diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 20:38:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 20:38:46 -0400 |
commit | 7a1e8b80fb1e8ead4cec15d1fc494ed290e4d2e9 (patch) | |
tree | 55a36d4256f1ae793b5c8e88c0f158737447193f | |
parent | a867d7349e94b6409b08629886a819f802377e91 (diff) | |
parent | 7616ac70d1bb4f2e9d25c1a82d283f3368a7b632 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security
Pull security subsystem updates from James Morris:
"Highlights:
- TPM core and driver updates/fixes
- IPv6 security labeling (CALIPSO)
- Lots of Apparmor fixes
- Seccomp: remove 2-phase API, close hole where ptrace can change
syscall #"
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security: (156 commits)
apparmor: fix SECURITY_APPARMOR_HASH_DEFAULT parameter handling
tpm: Add TPM 2.0 support to the Nuvoton i2c driver (NPCT6xx family)
tpm: Factor out common startup code
tpm: use devm_add_action_or_reset
tpm2_i2c_nuvoton: add irq validity check
tpm: read burstcount from TPM_STS in one 32-bit transaction
tpm: fix byte-order for the value read by tpm2_get_tpm_pt
tpm_tis_core: convert max timeouts from msec to jiffies
apparmor: fix arg_size computation for when setprocattr is null terminated
apparmor: fix oops, validate buffer size in apparmor_setprocattr()
apparmor: do not expose kernel stack
apparmor: fix module parameters can be changed after policy is locked
apparmor: fix oops in profile_unpack() when policy_db is not present
apparmor: don't check for vmalloc_addr if kvzalloc() failed
apparmor: add missing id bounds check on dfa verification
apparmor: allow SYS_CAP_RESOURCE to be sufficient to prlimit another task
apparmor: use list_next_entry instead of list_entry_next
apparmor: fix refcount race when finding a child profile
apparmor: fix ref count leak when profile sha1 hash is read
apparmor: check that xindex is in trans_table bounds
...
126 files changed, 7281 insertions, 2131 deletions
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index acc5cd64711c..a92d4f3e7044 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt | |||
@@ -126,6 +126,7 @@ national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware M | |||
126 | national,lm85 Temperature sensor with integrated fan control | 126 | national,lm85 Temperature sensor with integrated fan control |
127 | national,lm92 ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface | 127 | national,lm92 ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface |
128 | nuvoton,npct501 i2c trusted platform module (TPM) | 128 | nuvoton,npct501 i2c trusted platform module (TPM) |
129 | nuvoton,npct601 i2c trusted platform module (TPM2) | ||
129 | nxp,pca9556 Octal SMBus and I2C registered interface | 130 | nxp,pca9556 Octal SMBus and I2C registered interface |
130 | nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset | 131 | nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset |
131 | nxp,pcf8563 Real-time clock/calendar | 132 | nxp,pcf8563 Real-time clock/calendar |
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt new file mode 100644 index 000000000000..85741cd468cc --- /dev/null +++ b/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt | |||
@@ -0,0 +1,24 @@ | |||
1 | Required properties: | ||
2 | - compatible: should be one of the following | ||
3 | "st,st33htpm-spi" | ||
4 | "infineon,slb9670" | ||
5 | "tcg,tpm_tis-spi" | ||
6 | - spi-max-frequency: Maximum SPI frequency (depends on TPMs). | ||
7 | |||
8 | Optional SoC Specific Properties: | ||
9 | - pinctrl-names: Contains only one value - "default". | ||
10 | - pintctrl-0: Specifies the pin control groups used for this controller. | ||
11 | |||
12 | Example (for ARM-based BeagleBoard xM with TPM_TIS on SPI4): | ||
13 | |||
14 | &mcspi4 { | ||
15 | |||
16 | status = "okay"; | ||
17 | |||
18 | tpm_tis@0 { | ||
19 | |||
20 | compatible = "tcg,tpm_tis-spi"; | ||
21 | |||
22 | spi-max-frequency = <10000000>; | ||
23 | }; | ||
24 | }; | ||
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index d2bce2239769..b9361816fc32 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -128,6 +128,7 @@ idt Integrated Device Technologies, Inc. | |||
128 | ifi Ingenieurburo Fur Ic-Technologie (I/F/I) | 128 | ifi Ingenieurburo Fur Ic-Technologie (I/F/I) |
129 | iom Iomega Corporation | 129 | iom Iomega Corporation |
130 | img Imagination Technologies Ltd. | 130 | img Imagination Technologies Ltd. |
131 | infineon Infineon Technologies | ||
131 | inforce Inforce Computing | 132 | inforce Inforce Computing |
132 | ingenic Ingenic Semiconductor | 133 | ingenic Ingenic Semiconductor |
133 | innolux Innolux Corporation | 134 | innolux Innolux Corporation |
@@ -255,6 +256,7 @@ syna Synaptics Inc. | |||
255 | synology Synology, Inc. | 256 | synology Synology, Inc. |
256 | SUNW Sun Microsystems, Inc | 257 | SUNW Sun Microsystems, Inc |
257 | tbs TBS Technologies | 258 | tbs TBS Technologies |
259 | tcg Trusted Computing Group | ||
258 | tcl Toby Churchill Ltd. | 260 | tcl Toby Churchill Ltd. |
259 | technexion TechNexion | 261 | technexion TechNexion |
260 | technologic Technologic Systems | 262 | technologic Technologic Systems |
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 9369d3b0f09a..56af5e43e9c0 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
@@ -303,6 +303,7 @@ Code Seq#(hex) Include File Comments | |||
303 | <mailto:buk@buks.ipn.de> | 303 | <mailto:buk@buks.ipn.de> |
304 | 0xA0 all linux/sdp/sdp.h Industrial Device Project | 304 | 0xA0 all linux/sdp/sdp.h Industrial Device Project |
305 | <mailto:kenji@bitgate.com> | 305 | <mailto:kenji@bitgate.com> |
306 | 0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver | ||
306 | 0xA2 00-0F arch/tile/include/asm/hardwall.h | 307 | 0xA2 00-0F arch/tile/include/asm/hardwall.h |
307 | 0xA3 80-8F Port ACL in development: | 308 | 0xA3 80-8F Port ACL in development: |
308 | <mailto:tlewis@mindspring.com> | 309 | <mailto:tlewis@mindspring.com> |
diff --git a/Documentation/tpm/tpm_vtpm_proxy.txt b/Documentation/tpm/tpm_vtpm_proxy.txt new file mode 100644 index 000000000000..30d19022f869 --- /dev/null +++ b/Documentation/tpm/tpm_vtpm_proxy.txt | |||
@@ -0,0 +1,71 @@ | |||
1 | Virtual TPM Proxy Driver for Linux Containers | ||
2 | |||
3 | Authors: Stefan Berger (IBM) | ||
4 | |||
5 | This document describes the virtual Trusted Platform Module (vTPM) | ||
6 | proxy device driver for Linux containers. | ||
7 | |||
8 | INTRODUCTION | ||
9 | ------------ | ||
10 | |||
11 | The goal of this work is to provide TPM functionality to each Linux | ||
12 | container. This allows programs to interact with a TPM in a container | ||
13 | the same way they interact with a TPM on the physical system. Each | ||
14 | container gets its own unique, emulated, software TPM. | ||
15 | |||
16 | |||
17 | DESIGN | ||
18 | ------ | ||
19 | |||
20 | To make an emulated software TPM available to each container, the container | ||
21 | management stack needs to create a device pair consisting of a client TPM | ||
22 | character device /dev/tpmX (with X=0,1,2...) and a 'server side' file | ||
23 | descriptor. The former is moved into the container by creating a character | ||
24 | device with the appropriate major and minor numbers while the file descriptor | ||
25 | is passed to the TPM emulator. Software inside the container can then send | ||
26 | TPM commands using the character device and the emulator will receive the | ||
27 | commands via the file descriptor and use it for sending back responses. | ||
28 | |||
29 | To support this, the virtual TPM proxy driver provides a device /dev/vtpmx | ||
30 | that is used to create device pairs using an ioctl. The ioctl takes as | ||
31 | an input flags for configuring the device. The flags for example indicate | ||
32 | whether TPM 1.2 or TPM 2 functionality is supported by the TPM emulator. | ||
33 | The result of the ioctl are the file descriptor for the 'server side' | ||
34 | as well as the major and minor numbers of the character device that was created. | ||
35 | Besides that the number of the TPM character device is return. If for | ||
36 | example /dev/tpm10 was created, the number (dev_num) 10 is returned. | ||
37 | |||
38 | The following is the data structure of the TPM_PROXY_IOC_NEW_DEV ioctl: | ||
39 | |||
40 | struct vtpm_proxy_new_dev { | ||
41 | __u32 flags; /* input */ | ||
42 | __u32 tpm_num; /* output */ | ||
43 | __u32 fd; /* output */ | ||
44 | __u32 major; /* output */ | ||
45 | __u32 minor; /* output */ | ||
46 | }; | ||
47 | |||
48 | Note that if unsupported flags are passed to the device driver, the ioctl will | ||
49 | fail and errno will be set to EOPNOTSUPP. Similarly, if an unsupported ioctl is | ||
50 | called on the device driver, the ioctl will fail and errno will be set to | ||
51 | ENOTTY. | ||
52 | |||
53 | See /usr/include/linux/vtpm_proxy.h for definitions related to the public interface | ||
54 | of this vTPM device driver. | ||
55 | |||
56 | Once the device has been created, the driver will immediately try to talk | ||
57 | to the TPM. All commands from the driver can be read from the file descriptor | ||
58 | returned by the ioctl. The commands should be responded to immediately. | ||
59 | |||
60 | Depending on the version of TPM the following commands will be sent by the | ||
61 | driver: | ||
62 | |||
63 | - TPM 1.2: | ||
64 | - the driver will send a TPM_Startup command to the TPM emulator | ||
65 | - the driver will send commands to read the command durations and | ||
66 | interface timeouts from the TPM emulator | ||
67 | - TPM 2: | ||
68 | - the driver will send a TPM2_Startup command to the TPM emulator | ||
69 | |||
70 | The TPM device /dev/tpmX will only appear if all of the relevant commands | ||
71 | were responded to properly. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index febb29c4d0ca..256f56bbb2ad 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2837,7 +2837,7 @@ F: include/uapi/linux/can/error.h | |||
2837 | F: include/uapi/linux/can/netlink.h | 2837 | F: include/uapi/linux/can/netlink.h |
2838 | 2838 | ||
2839 | CAPABILITIES | 2839 | CAPABILITIES |
2840 | M: Serge Hallyn <serge.hallyn@canonical.com> | 2840 | M: Serge Hallyn <serge@hallyn.com> |
2841 | L: linux-security-module@vger.kernel.org | 2841 | L: linux-security-module@vger.kernel.org |
2842 | S: Supported | 2842 | S: Supported |
2843 | F: include/linux/capability.h | 2843 | F: include/linux/capability.h |
@@ -10675,7 +10675,7 @@ SMACK SECURITY MODULE | |||
10675 | M: Casey Schaufler <casey@schaufler-ca.com> | 10675 | M: Casey Schaufler <casey@schaufler-ca.com> |
10676 | L: linux-security-module@vger.kernel.org | 10676 | L: linux-security-module@vger.kernel.org |
10677 | W: http://schaufler-ca.com | 10677 | W: http://schaufler-ca.com |
10678 | T: git git://git.gitorious.org/smack-next/kernel.git | 10678 | T: git git://github.com/cschaufler/smack-next |
10679 | S: Maintained | 10679 | S: Maintained |
10680 | F: Documentation/security/Smack.txt | 10680 | F: Documentation/security/Smack.txt |
10681 | F: security/smack/ | 10681 | F: security/smack/ |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 4d9375814b53..ce131ed5939d 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -932,18 +932,19 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) | |||
932 | { | 932 | { |
933 | current_thread_info()->syscall = scno; | 933 | current_thread_info()->syscall = scno; |
934 | 934 | ||
935 | /* Do the secure computing check first; failures should be fast. */ | 935 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
936 | tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); | ||
937 | |||
938 | /* Do seccomp after ptrace; syscall may have changed. */ | ||
936 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER | 939 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER |
937 | if (secure_computing() == -1) | 940 | if (secure_computing(NULL) == -1) |
938 | return -1; | 941 | return -1; |
939 | #else | 942 | #else |
940 | /* XXX: remove this once OABI gets fixed */ | 943 | /* XXX: remove this once OABI gets fixed */ |
941 | secure_computing_strict(scno); | 944 | secure_computing_strict(current_thread_info()->syscall); |
942 | #endif | 945 | #endif |
943 | 946 | ||
944 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 947 | /* Tracer or seccomp may have changed syscall. */ |
945 | tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); | ||
946 | |||
947 | scno = current_thread_info()->syscall; | 948 | scno = current_thread_info()->syscall; |
948 | 949 | ||
949 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 950 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 030c1d5aa46d..e0c81da60f76 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -1347,13 +1347,13 @@ static void tracehook_report_syscall(struct pt_regs *regs, | |||
1347 | 1347 | ||
1348 | asmlinkage int syscall_trace_enter(struct pt_regs *regs) | 1348 | asmlinkage int syscall_trace_enter(struct pt_regs *regs) |
1349 | { | 1349 | { |
1350 | /* Do the secure computing check first; failures should be fast. */ | ||
1351 | if (secure_computing() == -1) | ||
1352 | return -1; | ||
1353 | |||
1354 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1350 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1355 | tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); | 1351 | tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); |
1356 | 1352 | ||
1353 | /* Do the secure computing after ptrace; failures should be fast. */ | ||
1354 | if (secure_computing(NULL) == -1) | ||
1355 | return -1; | ||
1356 | |||
1357 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 1357 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
1358 | trace_sys_enter(regs, regs->syscallno); | 1358 | trace_sys_enter(regs, regs->syscallno); |
1359 | 1359 | ||
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0dcf69194473..6103b24d1bfc 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -888,17 +888,16 @@ long arch_ptrace(struct task_struct *child, long request, | |||
888 | */ | 888 | */ |
889 | asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) | 889 | asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) |
890 | { | 890 | { |
891 | long ret = 0; | ||
892 | user_exit(); | 891 | user_exit(); |
893 | 892 | ||
894 | current_thread_info()->syscall = syscall; | 893 | current_thread_info()->syscall = syscall; |
895 | 894 | ||
896 | if (secure_computing() == -1) | ||
897 | return -1; | ||
898 | |||
899 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 895 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
900 | tracehook_report_syscall_entry(regs)) | 896 | tracehook_report_syscall_entry(regs)) |
901 | ret = -1; | 897 | return -1; |
898 | |||
899 | if (secure_computing(NULL) == -1) | ||
900 | return -1; | ||
902 | 901 | ||
903 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | 902 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
904 | trace_sys_enter(regs, regs->regs[2]); | 903 | trace_sys_enter(regs, regs->regs[2]); |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index b5458b37fc5b..e02d7b4d2b69 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -311,10 +311,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
311 | 311 | ||
312 | long do_syscall_trace_enter(struct pt_regs *regs) | 312 | long do_syscall_trace_enter(struct pt_regs *regs) |
313 | { | 313 | { |
314 | /* Do the secure computing check first. */ | ||
315 | if (secure_computing() == -1) | ||
316 | return -1; | ||
317 | |||
318 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 314 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
319 | tracehook_report_syscall_entry(regs)) { | 315 | tracehook_report_syscall_entry(regs)) { |
320 | /* | 316 | /* |
@@ -325,6 +321,11 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
325 | regs->gr[20] = -1UL; | 321 | regs->gr[20] = -1UL; |
326 | goto out; | 322 | goto out; |
327 | } | 323 | } |
324 | |||
325 | /* Do the secure computing check after ptrace. */ | ||
326 | if (secure_computing(NULL) == -1) | ||
327 | return -1; | ||
328 | |||
328 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | 329 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
329 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | 330 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
330 | trace_sys_enter(regs, regs->gr[20]); | 331 | trace_sys_enter(regs, regs->gr[20]); |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 060b140f03c6..134bee9ac664 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1783,12 +1783,12 @@ static int do_seccomp(struct pt_regs *regs) | |||
1783 | * have already loaded -ENOSYS into r3, or seccomp has put | 1783 | * have already loaded -ENOSYS into r3, or seccomp has put |
1784 | * something else in r3 (via SECCOMP_RET_ERRNO/TRACE). | 1784 | * something else in r3 (via SECCOMP_RET_ERRNO/TRACE). |
1785 | */ | 1785 | */ |
1786 | if (__secure_computing()) | 1786 | if (__secure_computing(NULL)) |
1787 | return -1; | 1787 | return -1; |
1788 | 1788 | ||
1789 | /* | 1789 | /* |
1790 | * The syscall was allowed by seccomp, restore the register | 1790 | * The syscall was allowed by seccomp, restore the register |
1791 | * state to what ptrace and audit expect. | 1791 | * state to what audit expects. |
1792 | * Note that we use orig_gpr3, which means a seccomp tracer can | 1792 | * Note that we use orig_gpr3, which means a seccomp tracer can |
1793 | * modify the first syscall parameter (in orig_gpr3) and also | 1793 | * modify the first syscall parameter (in orig_gpr3) and also |
1794 | * allow the syscall to proceed. | 1794 | * allow the syscall to proceed. |
@@ -1822,22 +1822,25 @@ static inline int do_seccomp(struct pt_regs *regs) { return 0; } | |||
1822 | */ | 1822 | */ |
1823 | long do_syscall_trace_enter(struct pt_regs *regs) | 1823 | long do_syscall_trace_enter(struct pt_regs *regs) |
1824 | { | 1824 | { |
1825 | bool abort = false; | ||
1826 | |||
1827 | user_exit(); | 1825 | user_exit(); |
1828 | 1826 | ||
1827 | /* | ||
1828 | * The tracer may decide to abort the syscall, if so tracehook | ||
1829 | * will return !0. Note that the tracer may also just change | ||
1830 | * regs->gpr[0] to an invalid syscall number, that is handled | ||
1831 | * below on the exit path. | ||
1832 | */ | ||
1833 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | ||
1834 | tracehook_report_syscall_entry(regs)) | ||
1835 | goto skip; | ||
1836 | |||
1837 | /* Run seccomp after ptrace; allow it to set gpr[3]. */ | ||
1829 | if (do_seccomp(regs)) | 1838 | if (do_seccomp(regs)) |
1830 | return -1; | 1839 | return -1; |
1831 | 1840 | ||
1832 | if (test_thread_flag(TIF_SYSCALL_TRACE)) { | 1841 | /* Avoid trace and audit when syscall is invalid. */ |
1833 | /* | 1842 | if (regs->gpr[0] >= NR_syscalls) |
1834 | * The tracer may decide to abort the syscall, if so tracehook | 1843 | goto skip; |
1835 | * will return !0. Note that the tracer may also just change | ||
1836 | * regs->gpr[0] to an invalid syscall number, that is handled | ||
1837 | * below on the exit path. | ||
1838 | */ | ||
1839 | abort = tracehook_report_syscall_entry(regs) != 0; | ||
1840 | } | ||
1841 | 1844 | ||
1842 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | 1845 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
1843 | trace_sys_enter(regs, regs->gpr[0]); | 1846 | trace_sys_enter(regs, regs->gpr[0]); |
@@ -1854,17 +1857,16 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
1854 | regs->gpr[5] & 0xffffffff, | 1857 | regs->gpr[5] & 0xffffffff, |
1855 | regs->gpr[6] & 0xffffffff); | 1858 | regs->gpr[6] & 0xffffffff); |
1856 | 1859 | ||
1857 | if (abort || regs->gpr[0] >= NR_syscalls) { | ||
1858 | /* | ||
1859 | * If we are aborting explicitly, or if the syscall number is | ||
1860 | * now invalid, set the return value to -ENOSYS. | ||
1861 | */ | ||
1862 | regs->gpr[3] = -ENOSYS; | ||
1863 | return -1; | ||
1864 | } | ||
1865 | |||
1866 | /* Return the possibly modified but valid syscall number */ | 1860 | /* Return the possibly modified but valid syscall number */ |
1867 | return regs->gpr[0]; | 1861 | return regs->gpr[0]; |
1862 | |||
1863 | skip: | ||
1864 | /* | ||
1865 | * If we are aborting explicitly, or if the syscall number is | ||
1866 | * now invalid, set the return value to -ENOSYS. | ||
1867 | */ | ||
1868 | regs->gpr[3] = -ENOSYS; | ||
1869 | return -1; | ||
1868 | } | 1870 | } |
1869 | 1871 | ||
1870 | void do_syscall_trace_leave(struct pt_regs *regs) | 1872 | void do_syscall_trace_leave(struct pt_regs *regs) |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 49b1c13bf6c9..cea17010448f 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -821,15 +821,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
821 | 821 | ||
822 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | 822 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) |
823 | { | 823 | { |
824 | long ret = 0; | ||
825 | |||
826 | /* Do the secure computing check first. */ | ||
827 | if (secure_computing()) { | ||
828 | /* seccomp failures shouldn't expose any additional code. */ | ||
829 | ret = -1; | ||
830 | goto out; | ||
831 | } | ||
832 | |||
833 | /* | 824 | /* |
834 | * The sysc_tracesys code in entry.S stored the system | 825 | * The sysc_tracesys code in entry.S stored the system |
835 | * call number to gprs[2]. | 826 | * call number to gprs[2]. |
@@ -843,7 +834,13 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
843 | * the system call and the system call restart handling. | 834 | * the system call and the system call restart handling. |
844 | */ | 835 | */ |
845 | clear_pt_regs_flag(regs, PIF_SYSCALL); | 836 | clear_pt_regs_flag(regs, PIF_SYSCALL); |
846 | ret = -1; | 837 | return -1; |
838 | } | ||
839 | |||
840 | /* Do the secure computing check after ptrace. */ | ||
841 | if (secure_computing(NULL)) { | ||
842 | /* seccomp failures shouldn't expose any additional code. */ | ||
843 | return -1; | ||
847 | } | 844 | } |
848 | 845 | ||
849 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | 846 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
@@ -852,8 +849,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
852 | audit_syscall_entry(regs->gprs[2], regs->orig_gpr2, | 849 | audit_syscall_entry(regs->gprs[2], regs->orig_gpr2, |
853 | regs->gprs[3], regs->gprs[4], | 850 | regs->gprs[3], regs->gprs[4], |
854 | regs->gprs[5]); | 851 | regs->gprs[5]); |
855 | out: | 852 | |
856 | return ret ?: regs->gprs[2]; | 853 | return regs->gprs[2]; |
857 | } | 854 | } |
858 | 855 | ||
859 | asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) | 856 | asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) |
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index 54e7b723db99..d89b7011667c 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c | |||
@@ -255,14 +255,15 @@ int do_syscall_trace_enter(struct pt_regs *regs) | |||
255 | { | 255 | { |
256 | u32 work = ACCESS_ONCE(current_thread_info()->flags); | 256 | u32 work = ACCESS_ONCE(current_thread_info()->flags); |
257 | 257 | ||
258 | if (secure_computing() == -1) | 258 | if ((work & _TIF_SYSCALL_TRACE) && |
259 | tracehook_report_syscall_entry(regs)) { | ||
260 | regs->regs[TREG_SYSCALL_NR] = -1; | ||
259 | return -1; | 261 | return -1; |
260 | |||
261 | if (work & _TIF_SYSCALL_TRACE) { | ||
262 | if (tracehook_report_syscall_entry(regs)) | ||
263 | regs->regs[TREG_SYSCALL_NR] = -1; | ||
264 | } | 262 | } |
265 | 263 | ||
264 | if (secure_computing(NULL) == -1) | ||
265 | return -1; | ||
266 | |||
266 | if (work & _TIF_SYSCALL_TRACEPOINT) | 267 | if (work & _TIF_SYSCALL_TRACEPOINT) |
267 | trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]); | 268 | trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]); |
268 | 269 | ||
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c index 48b0dcbd87be..ef4b8f949b51 100644 --- a/arch/um/kernel/skas/syscall.c +++ b/arch/um/kernel/skas/syscall.c | |||
@@ -20,12 +20,12 @@ void handle_syscall(struct uml_pt_regs *r) | |||
20 | UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp); | 20 | UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp); |
21 | PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); | 21 | PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); |
22 | 22 | ||
23 | /* Do the secure computing check first; failures should be fast. */ | 23 | if (syscall_trace_enter(regs)) |
24 | if (secure_computing() == -1) | ||
25 | return; | 24 | return; |
26 | 25 | ||
27 | if (syscall_trace_enter(regs)) | 26 | /* Do the seccomp check after ptrace; failures should be fast. */ |
28 | goto out; | 27 | if (secure_computing(NULL) == -1) |
28 | return; | ||
29 | 29 | ||
30 | /* Update the syscall number after orig_ax has potentially been updated | 30 | /* Update the syscall number after orig_ax has potentially been updated |
31 | * with ptrace. | 31 | * with ptrace. |
@@ -37,6 +37,5 @@ void handle_syscall(struct uml_pt_regs *r) | |||
37 | PT_REGS_SET_SYSCALL_RETURN(regs, | 37 | PT_REGS_SET_SYSCALL_RETURN(regs, |
38 | EXECUTE_SYSCALL(syscall, regs)); | 38 | EXECUTE_SYSCALL(syscall, regs)); |
39 | 39 | ||
40 | out: | ||
41 | syscall_trace_leave(regs); | 40 | syscall_trace_leave(regs); |
42 | } | 41 | } |
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 9e1e27d31c6d..a1e71d431fed 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c | |||
@@ -64,22 +64,16 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) | |||
64 | } | 64 | } |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * We can return 0 to resume the syscall or anything else to go to phase | 67 | * Returns the syscall nr to run (which should match regs->orig_ax) or -1 |
68 | * 2. If we resume the syscall, we need to put something appropriate in | 68 | * to skip the syscall. |
69 | * regs->orig_ax. | ||
70 | * | ||
71 | * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax | ||
72 | * are fully functional. | ||
73 | * | ||
74 | * For phase 2's benefit, our return value is: | ||
75 | * 0: resume the syscall | ||
76 | * 1: go to phase 2; no seccomp phase 2 needed | ||
77 | * anything else: go to phase 2; pass return value to seccomp | ||
78 | */ | 69 | */ |
79 | unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) | 70 | static long syscall_trace_enter(struct pt_regs *regs) |
80 | { | 71 | { |
72 | u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; | ||
73 | |||
81 | struct thread_info *ti = pt_regs_to_thread_info(regs); | 74 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
82 | unsigned long ret = 0; | 75 | unsigned long ret = 0; |
76 | bool emulated = false; | ||
83 | u32 work; | 77 | u32 work; |
84 | 78 | ||
85 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) | 79 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
@@ -87,11 +81,19 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) | |||
87 | 81 | ||
88 | work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; | 82 | work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
89 | 83 | ||
84 | if (unlikely(work & _TIF_SYSCALL_EMU)) | ||
85 | emulated = true; | ||
86 | |||
87 | if ((emulated || (work & _TIF_SYSCALL_TRACE)) && | ||
88 | tracehook_report_syscall_entry(regs)) | ||
89 | return -1L; | ||
90 | |||
91 | if (emulated) | ||
92 | return -1L; | ||
93 | |||
90 | #ifdef CONFIG_SECCOMP | 94 | #ifdef CONFIG_SECCOMP |
91 | /* | 95 | /* |
92 | * Do seccomp first -- it should minimize exposure of other | 96 | * Do seccomp after ptrace, to catch any tracer changes. |
93 | * code, and keeping seccomp fast is probably more valuable | ||
94 | * than the rest of this. | ||
95 | */ | 97 | */ |
96 | if (work & _TIF_SECCOMP) { | 98 | if (work & _TIF_SECCOMP) { |
97 | struct seccomp_data sd; | 99 | struct seccomp_data sd; |
@@ -118,69 +120,12 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) | |||
118 | sd.args[5] = regs->bp; | 120 | sd.args[5] = regs->bp; |
119 | } | 121 | } |
120 | 122 | ||
121 | BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0); | 123 | ret = __secure_computing(&sd); |
122 | BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1); | 124 | if (ret == -1) |
123 | 125 | return ret; | |
124 | ret = seccomp_phase1(&sd); | ||
125 | if (ret == SECCOMP_PHASE1_SKIP) { | ||
126 | regs->orig_ax = -1; | ||
127 | ret = 0; | ||
128 | } else if (ret != SECCOMP_PHASE1_OK) { | ||
129 | return ret; /* Go directly to phase 2 */ | ||
130 | } | ||
131 | |||
132 | work &= ~_TIF_SECCOMP; | ||
133 | } | ||
134 | #endif | ||
135 | |||
136 | /* Do our best to finish without phase 2. */ | ||
137 | if (work == 0) | ||
138 | return ret; /* seccomp and/or nohz only (ret == 0 here) */ | ||
139 | |||
140 | #ifdef CONFIG_AUDITSYSCALL | ||
141 | if (work == _TIF_SYSCALL_AUDIT) { | ||
142 | /* | ||
143 | * If there is no more work to be done except auditing, | ||
144 | * then audit in phase 1. Phase 2 always audits, so, if | ||
145 | * we audit here, then we can't go on to phase 2. | ||
146 | */ | ||
147 | do_audit_syscall_entry(regs, arch); | ||
148 | return 0; | ||
149 | } | 126 | } |
150 | #endif | 127 | #endif |
151 | 128 | ||
152 | return 1; /* Something is enabled that we can't handle in phase 1 */ | ||
153 | } | ||
154 | |||
155 | /* Returns the syscall nr to run (which should match regs->orig_ax). */ | ||
156 | long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, | ||
157 | unsigned long phase1_result) | ||
158 | { | ||
159 | struct thread_info *ti = pt_regs_to_thread_info(regs); | ||
160 | long ret = 0; | ||
161 | u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; | ||
162 | |||
163 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) | ||
164 | BUG_ON(regs != task_pt_regs(current)); | ||
165 | |||
166 | #ifdef CONFIG_SECCOMP | ||
167 | /* | ||
168 | * Call seccomp_phase2 before running the other hooks so that | ||
169 | * they can see any changes made by a seccomp tracer. | ||
170 | */ | ||
171 | if (phase1_result > 1 && seccomp_phase2(phase1_result)) { | ||
172 | /* seccomp failures shouldn't expose any additional code. */ | ||
173 | return -1; | ||
174 | } | ||
175 | #endif | ||
176 | |||
177 | if (unlikely(work & _TIF_SYSCALL_EMU)) | ||
178 | ret = -1L; | ||
179 | |||
180 | if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && | ||
181 | tracehook_report_syscall_entry(regs)) | ||
182 | ret = -1L; | ||
183 | |||
184 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | 129 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
185 | trace_sys_enter(regs, regs->orig_ax); | 130 | trace_sys_enter(regs, regs->orig_ax); |
186 | 131 | ||
@@ -189,17 +134,6 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, | |||
189 | return ret ?: regs->orig_ax; | 134 | return ret ?: regs->orig_ax; |
190 | } | 135 | } |
191 | 136 | ||
192 | long syscall_trace_enter(struct pt_regs *regs) | ||
193 | { | ||
194 | u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; | ||
195 | unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); | ||
196 | |||
197 | if (phase1_result == 0) | ||
198 | return regs->orig_ax; | ||
199 | else | ||
200 | return syscall_trace_enter_phase2(regs, arch, phase1_result); | ||
201 | } | ||
202 | |||
203 | #define EXIT_TO_USERMODE_LOOP_FLAGS \ | 137 | #define EXIT_TO_USERMODE_LOOP_FLAGS \ |
204 | (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ | 138 | (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
205 | _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) | 139 | _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) |
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 75fc719b7f31..636c4b341f36 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c | |||
@@ -207,7 +207,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | |||
207 | */ | 207 | */ |
208 | regs->orig_ax = syscall_nr; | 208 | regs->orig_ax = syscall_nr; |
209 | regs->ax = -ENOSYS; | 209 | regs->ax = -ENOSYS; |
210 | tmp = secure_computing(); | 210 | tmp = secure_computing(NULL); |
211 | if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { | 211 | if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { |
212 | warn_bad_vsyscall(KERN_DEBUG, regs, | 212 | warn_bad_vsyscall(KERN_DEBUG, regs, |
213 | "seccomp tried to change syscall nr or ip"); | 213 | "seccomp tried to change syscall nr or ip"); |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 6271281f947d..2b5d686ea9f3 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -83,12 +83,6 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |||
83 | int error_code, int si_code); | 83 | int error_code, int si_code); |
84 | 84 | ||
85 | 85 | ||
86 | extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch); | ||
87 | extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, | ||
88 | unsigned long phase1_result); | ||
89 | |||
90 | extern long syscall_trace_enter(struct pt_regs *); | ||
91 | |||
92 | static inline unsigned long regs_return_value(struct pt_regs *regs) | 86 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
93 | { | 87 | { |
94 | return regs->ax; | 88 | return regs->ax; |
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 3b84a8b1bfbe..9faa0b1e7766 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
@@ -24,9 +24,16 @@ menuconfig TCG_TPM | |||
24 | 24 | ||
25 | if TCG_TPM | 25 | if TCG_TPM |
26 | 26 | ||
27 | config TCG_TIS_CORE | ||
28 | tristate | ||
29 | ---help--- | ||
30 | TCG TIS TPM core driver. It implements the TPM TCG TIS logic and hooks | ||
31 | into the TPM kernel APIs. Physical layers will register against it. | ||
32 | |||
27 | config TCG_TIS | 33 | config TCG_TIS |
28 | tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface" | 34 | tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface" |
29 | depends on X86 | 35 | depends on X86 |
36 | select TCG_TIS_CORE | ||
30 | ---help--- | 37 | ---help--- |
31 | If you have a TPM security chip that is compliant with the | 38 | If you have a TPM security chip that is compliant with the |
32 | TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO | 39 | TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO |
@@ -34,6 +41,18 @@ config TCG_TIS | |||
34 | within Linux. To compile this driver as a module, choose M here; | 41 | within Linux. To compile this driver as a module, choose M here; |
35 | the module will be called tpm_tis. | 42 | the module will be called tpm_tis. |
36 | 43 | ||
44 | config TCG_TIS_SPI | ||
45 | tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (SPI)" | ||
46 | depends on SPI | ||
47 | select TCG_TIS_CORE | ||
48 | ---help--- | ||
49 | If you have a TPM security chip which is connected to a regular, | ||
50 | non-tcg SPI master (i.e. most embedded platforms) that is compliant with the | ||
51 | TCG TIS 1.3 TPM specification (TPM1.2) or the TCG PTP FIFO | ||
52 | specification (TPM2.0) say Yes and it will be accessible from | ||
53 | within Linux. To compile this driver as a module, choose M here; | ||
54 | the module will be called tpm_tis_spi. | ||
55 | |||
37 | config TCG_TIS_I2C_ATMEL | 56 | config TCG_TIS_I2C_ATMEL |
38 | tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" | 57 | tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" |
39 | depends on I2C | 58 | depends on I2C |
@@ -122,5 +141,16 @@ config TCG_CRB | |||
122 | from within Linux. To compile this driver as a module, choose | 141 | from within Linux. To compile this driver as a module, choose |
123 | M here; the module will be called tpm_crb. | 142 | M here; the module will be called tpm_crb. |
124 | 143 | ||
144 | config TCG_VTPM_PROXY | ||
145 | tristate "VTPM Proxy Interface" | ||
146 | depends on TCG_TPM | ||
147 | select ANON_INODES | ||
148 | ---help--- | ||
149 | This driver proxies for an emulated TPM (vTPM) running in userspace. | ||
150 | A device /dev/vtpmx is provided that creates a device pair | ||
151 | /dev/vtpmX and a server-side file descriptor on which the vTPM | ||
152 | can receive commands. | ||
153 | |||
154 | |||
125 | source "drivers/char/tpm/st33zp24/Kconfig" | 155 | source "drivers/char/tpm/st33zp24/Kconfig" |
126 | endif # TCG_TPM | 156 | endif # TCG_TPM |
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 56e8f1f3dc7e..a385fb8c17de 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile | |||
@@ -12,7 +12,9 @@ ifdef CONFIG_TCG_IBMVTPM | |||
12 | tpm-y += tpm_eventlog.o tpm_of.o | 12 | tpm-y += tpm_eventlog.o tpm_of.o |
13 | endif | 13 | endif |
14 | endif | 14 | endif |
15 | obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o | ||
15 | obj-$(CONFIG_TCG_TIS) += tpm_tis.o | 16 | obj-$(CONFIG_TCG_TIS) += tpm_tis.o |
17 | obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o | ||
16 | obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o | 18 | obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o |
17 | obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o | 19 | obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o |
18 | obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o | 20 | obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o |
@@ -23,3 +25,4 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o | |||
23 | obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/ | 25 | obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/ |
24 | obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o | 26 | obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o |
25 | obj-$(CONFIG_TCG_CRB) += tpm_crb.o | 27 | obj-$(CONFIG_TCG_CRB) += tpm_crb.o |
28 | obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o | ||
diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig index 19c007461d1c..e74c6f29fc85 100644 --- a/drivers/char/tpm/st33zp24/Kconfig +++ b/drivers/char/tpm/st33zp24/Kconfig | |||
@@ -1,6 +1,5 @@ | |||
1 | config TCG_TIS_ST33ZP24 | 1 | config TCG_TIS_ST33ZP24 |
2 | tristate "STMicroelectronics TPM Interface Specification 1.2 Interface" | 2 | tristate |
3 | depends on GPIOLIB || COMPILE_TEST | ||
4 | ---help--- | 3 | ---help--- |
5 | STMicroelectronics ST33ZP24 core driver. It implements the core | 4 | STMicroelectronics ST33ZP24 core driver. It implements the core |
6 | TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will | 5 | TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will |
@@ -10,9 +9,9 @@ config TCG_TIS_ST33ZP24 | |||
10 | tpm_st33zp24. | 9 | tpm_st33zp24. |
11 | 10 | ||
12 | config TCG_TIS_ST33ZP24_I2C | 11 | config TCG_TIS_ST33ZP24_I2C |
13 | tristate "TPM 1.2 ST33ZP24 I2C support" | 12 | tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (I2C)" |
14 | depends on TCG_TIS_ST33ZP24 | ||
15 | depends on I2C | 13 | depends on I2C |
14 | select TCG_TIS_ST33ZP24 | ||
16 | ---help--- | 15 | ---help--- |
17 | This module adds support for the STMicroelectronics TPM security chip | 16 | This module adds support for the STMicroelectronics TPM security chip |
18 | ST33ZP24 with i2c interface. | 17 | ST33ZP24 with i2c interface. |
@@ -20,9 +19,9 @@ config TCG_TIS_ST33ZP24_I2C | |||
20 | called tpm_st33zp24_i2c. | 19 | called tpm_st33zp24_i2c. |
21 | 20 | ||
22 | config TCG_TIS_ST33ZP24_SPI | 21 | config TCG_TIS_ST33ZP24_SPI |
23 | tristate "TPM 1.2 ST33ZP24 SPI support" | 22 | tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (SPI)" |
24 | depends on TCG_TIS_ST33ZP24 | ||
25 | depends on SPI | 23 | depends on SPI |
24 | select TCG_TIS_ST33ZP24 | ||
26 | ---help--- | 25 | ---help--- |
27 | This module adds support for the STMicroelectronics TPM security chip | 26 | This module adds support for the STMicroelectronics TPM security chip |
28 | ST33ZP24 with spi interface. | 27 | ST33ZP24 with spi interface. |
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 309d2767c6a1..028a9cd76b63 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24 | 2 | * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24 |
3 | * Copyright (C) 2009 - 2015 STMicroelectronics | 3 | * Copyright (C) 2009 - 2016 STMicroelectronics |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -19,11 +19,14 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
21 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
22 | #include <linux/gpio/consumer.h> | ||
22 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
23 | #include <linux/of_gpio.h> | 24 | #include <linux/of_gpio.h> |
25 | #include <linux/acpi.h> | ||
24 | #include <linux/tpm.h> | 26 | #include <linux/tpm.h> |
25 | #include <linux/platform_data/st33zp24.h> | 27 | #include <linux/platform_data/st33zp24.h> |
26 | 28 | ||
29 | #include "../tpm.h" | ||
27 | #include "st33zp24.h" | 30 | #include "st33zp24.h" |
28 | 31 | ||
29 | #define TPM_DUMMY_BYTE 0xAA | 32 | #define TPM_DUMMY_BYTE 0xAA |
@@ -108,11 +111,40 @@ static const struct st33zp24_phy_ops i2c_phy_ops = { | |||
108 | .recv = st33zp24_i2c_recv, | 111 | .recv = st33zp24_i2c_recv, |
109 | }; | 112 | }; |
110 | 113 | ||
111 | #ifdef CONFIG_OF | 114 | static int st33zp24_i2c_acpi_request_resources(struct i2c_client *client) |
112 | static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) | ||
113 | { | 115 | { |
116 | struct tpm_chip *chip = i2c_get_clientdata(client); | ||
117 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
118 | struct st33zp24_i2c_phy *phy = tpm_dev->phy_id; | ||
119 | struct gpio_desc *gpiod_lpcpd; | ||
120 | struct device *dev = &client->dev; | ||
121 | |||
122 | /* Get LPCPD GPIO from ACPI */ | ||
123 | gpiod_lpcpd = devm_gpiod_get_index(dev, "TPM IO LPCPD", 1, | ||
124 | GPIOD_OUT_HIGH); | ||
125 | if (IS_ERR(gpiod_lpcpd)) { | ||
126 | dev_err(&client->dev, | ||
127 | "Failed to retrieve lpcpd-gpios from acpi.\n"); | ||
128 | phy->io_lpcpd = -1; | ||
129 | /* | ||
130 | * lpcpd pin is not specified. This is not an issue as | ||
131 | * power management can be also managed by TPM specific | ||
132 | * commands. So leave with a success status code. | ||
133 | */ | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static int st33zp24_i2c_of_request_resources(struct i2c_client *client) | ||
143 | { | ||
144 | struct tpm_chip *chip = i2c_get_clientdata(client); | ||
145 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
146 | struct st33zp24_i2c_phy *phy = tpm_dev->phy_id; | ||
114 | struct device_node *pp; | 147 | struct device_node *pp; |
115 | struct i2c_client *client = phy->client; | ||
116 | int gpio; | 148 | int gpio; |
117 | int ret; | 149 | int ret; |
118 | 150 | ||
@@ -146,16 +178,12 @@ static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) | |||
146 | 178 | ||
147 | return 0; | 179 | return 0; |
148 | } | 180 | } |
149 | #else | ||
150 | static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) | ||
151 | { | ||
152 | return -ENODEV; | ||
153 | } | ||
154 | #endif | ||
155 | 181 | ||
156 | static int st33zp24_i2c_request_resources(struct i2c_client *client, | 182 | static int st33zp24_i2c_request_resources(struct i2c_client *client) |
157 | struct st33zp24_i2c_phy *phy) | ||
158 | { | 183 | { |
184 | struct tpm_chip *chip = i2c_get_clientdata(client); | ||
185 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
186 | struct st33zp24_i2c_phy *phy = tpm_dev->phy_id; | ||
159 | struct st33zp24_platform_data *pdata; | 187 | struct st33zp24_platform_data *pdata; |
160 | int ret; | 188 | int ret; |
161 | 189 | ||
@@ -212,13 +240,18 @@ static int st33zp24_i2c_probe(struct i2c_client *client, | |||
212 | return -ENOMEM; | 240 | return -ENOMEM; |
213 | 241 | ||
214 | phy->client = client; | 242 | phy->client = client; |
243 | |||
215 | pdata = client->dev.platform_data; | 244 | pdata = client->dev.platform_data; |
216 | if (!pdata && client->dev.of_node) { | 245 | if (!pdata && client->dev.of_node) { |
217 | ret = st33zp24_i2c_of_request_resources(phy); | 246 | ret = st33zp24_i2c_of_request_resources(client); |
218 | if (ret) | 247 | if (ret) |
219 | return ret; | 248 | return ret; |
220 | } else if (pdata) { | 249 | } else if (pdata) { |
221 | ret = st33zp24_i2c_request_resources(client, phy); | 250 | ret = st33zp24_i2c_request_resources(client); |
251 | if (ret) | ||
252 | return ret; | ||
253 | } else if (ACPI_HANDLE(&client->dev)) { | ||
254 | ret = st33zp24_i2c_acpi_request_resources(client); | ||
222 | if (ret) | 255 | if (ret) |
223 | return ret; | 256 | return ret; |
224 | } | 257 | } |
@@ -245,13 +278,17 @@ static const struct i2c_device_id st33zp24_i2c_id[] = { | |||
245 | }; | 278 | }; |
246 | MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); | 279 | MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); |
247 | 280 | ||
248 | #ifdef CONFIG_OF | ||
249 | static const struct of_device_id of_st33zp24_i2c_match[] = { | 281 | static const struct of_device_id of_st33zp24_i2c_match[] = { |
250 | { .compatible = "st,st33zp24-i2c", }, | 282 | { .compatible = "st,st33zp24-i2c", }, |
251 | {} | 283 | {} |
252 | }; | 284 | }; |
253 | MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match); | 285 | MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match); |
254 | #endif | 286 | |
287 | static const struct acpi_device_id st33zp24_i2c_acpi_match[] = { | ||
288 | {"SMO3324"}, | ||
289 | {} | ||
290 | }; | ||
291 | MODULE_DEVICE_TABLE(acpi, st33zp24_i2c_acpi_match); | ||
255 | 292 | ||
256 | static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend, | 293 | static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend, |
257 | st33zp24_pm_resume); | 294 | st33zp24_pm_resume); |
@@ -261,6 +298,7 @@ static struct i2c_driver st33zp24_i2c_driver = { | |||
261 | .name = TPM_ST33_I2C, | 298 | .name = TPM_ST33_I2C, |
262 | .pm = &st33zp24_i2c_ops, | 299 | .pm = &st33zp24_i2c_ops, |
263 | .of_match_table = of_match_ptr(of_st33zp24_i2c_match), | 300 | .of_match_table = of_match_ptr(of_st33zp24_i2c_match), |
301 | .acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match), | ||
264 | }, | 302 | }, |
265 | .probe = st33zp24_i2c_probe, | 303 | .probe = st33zp24_i2c_probe, |
266 | .remove = st33zp24_i2c_remove, | 304 | .remove = st33zp24_i2c_remove, |
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index f974c945c97a..9f5a0117098c 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24 | 2 | * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24 |
3 | * Copyright (C) 2009 - 2015 STMicroelectronics | 3 | * Copyright (C) 2009 - 2016 STMicroelectronics |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -19,11 +19,14 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/spi/spi.h> | 20 | #include <linux/spi/spi.h> |
21 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
22 | #include <linux/gpio/consumer.h> | ||
22 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
23 | #include <linux/of_gpio.h> | 24 | #include <linux/of_gpio.h> |
25 | #include <linux/acpi.h> | ||
24 | #include <linux/tpm.h> | 26 | #include <linux/tpm.h> |
25 | #include <linux/platform_data/st33zp24.h> | 27 | #include <linux/platform_data/st33zp24.h> |
26 | 28 | ||
29 | #include "../tpm.h" | ||
27 | #include "st33zp24.h" | 30 | #include "st33zp24.h" |
28 | 31 | ||
29 | #define TPM_DATA_FIFO 0x24 | 32 | #define TPM_DATA_FIFO 0x24 |
@@ -66,7 +69,7 @@ | |||
66 | 69 | ||
67 | struct st33zp24_spi_phy { | 70 | struct st33zp24_spi_phy { |
68 | struct spi_device *spi_device; | 71 | struct spi_device *spi_device; |
69 | struct spi_transfer spi_xfer; | 72 | |
70 | u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE]; | 73 | u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE]; |
71 | u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE]; | 74 | u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE]; |
72 | 75 | ||
@@ -110,43 +113,39 @@ static int st33zp24_status_to_errno(u8 code) | |||
110 | static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data, | 113 | static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data, |
111 | int tpm_size) | 114 | int tpm_size) |
112 | { | 115 | { |
113 | u8 data = 0; | 116 | int total_length = 0, ret = 0; |
114 | int total_length = 0, nbr_dummy_bytes = 0, ret = 0; | ||
115 | struct st33zp24_spi_phy *phy = phy_id; | 117 | struct st33zp24_spi_phy *phy = phy_id; |
116 | struct spi_device *dev = phy->spi_device; | 118 | struct spi_device *dev = phy->spi_device; |
117 | u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf; | 119 | struct spi_transfer spi_xfer = { |
118 | u8 *rx_buf = phy->spi_xfer.rx_buf; | 120 | .tx_buf = phy->tx_buf, |
121 | .rx_buf = phy->rx_buf, | ||
122 | }; | ||
119 | 123 | ||
120 | /* Pre-Header */ | 124 | /* Pre-Header */ |
121 | data = TPM_WRITE_DIRECTION | LOCALITY0; | 125 | phy->tx_buf[total_length++] = TPM_WRITE_DIRECTION | LOCALITY0; |
122 | memcpy(tx_buf + total_length, &data, sizeof(data)); | 126 | phy->tx_buf[total_length++] = tpm_register; |
123 | total_length++; | ||
124 | data = tpm_register; | ||
125 | memcpy(tx_buf + total_length, &data, sizeof(data)); | ||
126 | total_length++; | ||
127 | 127 | ||
128 | if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) { | 128 | if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) { |
129 | tx_buf[total_length++] = tpm_size >> 8; | 129 | phy->tx_buf[total_length++] = tpm_size >> 8; |
130 | tx_buf[total_length++] = tpm_size; | 130 | phy->tx_buf[total_length++] = tpm_size; |
131 | } | 131 | } |
132 | 132 | ||
133 | memcpy(&tx_buf[total_length], tpm_data, tpm_size); | 133 | memcpy(&phy->tx_buf[total_length], tpm_data, tpm_size); |
134 | total_length += tpm_size; | 134 | total_length += tpm_size; |
135 | 135 | ||
136 | nbr_dummy_bytes = phy->latency; | 136 | memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency); |
137 | memset(&tx_buf[total_length], TPM_DUMMY_BYTE, nbr_dummy_bytes); | ||
138 | 137 | ||
139 | phy->spi_xfer.len = total_length + nbr_dummy_bytes; | 138 | spi_xfer.len = total_length + phy->latency; |
140 | 139 | ||
141 | ret = spi_sync_transfer(dev, &phy->spi_xfer, 1); | 140 | ret = spi_sync_transfer(dev, &spi_xfer, 1); |
142 | if (ret == 0) | 141 | if (ret == 0) |
143 | ret = rx_buf[total_length + nbr_dummy_bytes - 1]; | 142 | ret = phy->rx_buf[total_length + phy->latency - 1]; |
144 | 143 | ||
145 | return st33zp24_status_to_errno(ret); | 144 | return st33zp24_status_to_errno(ret); |
146 | } /* st33zp24_spi_send() */ | 145 | } /* st33zp24_spi_send() */ |
147 | 146 | ||
148 | /* | 147 | /* |
149 | * read8_recv | 148 | * st33zp24_spi_read8_recv |
150 | * Recv byte from the TIS register according to the ST33ZP24 SPI protocol. | 149 | * Recv byte from the TIS register according to the ST33ZP24 SPI protocol. |
151 | * @param: phy_id, the phy description | 150 | * @param: phy_id, the phy description |
152 | * @param: tpm_register, the tpm tis register where the data should be read | 151 | * @param: tpm_register, the tpm tis register where the data should be read |
@@ -154,40 +153,37 @@ static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data, | |||
154 | * @param: tpm_size, tpm TPM response size to read. | 153 | * @param: tpm_size, tpm TPM response size to read. |
155 | * @return: should be zero if success else a negative error code. | 154 | * @return: should be zero if success else a negative error code. |
156 | */ | 155 | */ |
157 | static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) | 156 | static int st33zp24_spi_read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, |
157 | int tpm_size) | ||
158 | { | 158 | { |
159 | u8 data = 0; | 159 | int total_length = 0, ret; |
160 | int total_length = 0, nbr_dummy_bytes, ret; | ||
161 | struct st33zp24_spi_phy *phy = phy_id; | 160 | struct st33zp24_spi_phy *phy = phy_id; |
162 | struct spi_device *dev = phy->spi_device; | 161 | struct spi_device *dev = phy->spi_device; |
163 | u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf; | 162 | struct spi_transfer spi_xfer = { |
164 | u8 *rx_buf = phy->spi_xfer.rx_buf; | 163 | .tx_buf = phy->tx_buf, |
164 | .rx_buf = phy->rx_buf, | ||
165 | }; | ||
165 | 166 | ||
166 | /* Pre-Header */ | 167 | /* Pre-Header */ |
167 | data = LOCALITY0; | 168 | phy->tx_buf[total_length++] = LOCALITY0; |
168 | memcpy(tx_buf + total_length, &data, sizeof(data)); | 169 | phy->tx_buf[total_length++] = tpm_register; |
169 | total_length++; | ||
170 | data = tpm_register; | ||
171 | memcpy(tx_buf + total_length, &data, sizeof(data)); | ||
172 | total_length++; | ||
173 | 170 | ||
174 | nbr_dummy_bytes = phy->latency; | 171 | memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, |
175 | memset(&tx_buf[total_length], TPM_DUMMY_BYTE, | 172 | phy->latency + tpm_size); |
176 | nbr_dummy_bytes + tpm_size); | ||
177 | 173 | ||
178 | phy->spi_xfer.len = total_length + nbr_dummy_bytes + tpm_size; | 174 | spi_xfer.len = total_length + phy->latency + tpm_size; |
179 | 175 | ||
180 | /* header + status byte + size of the data + status byte */ | 176 | /* header + status byte + size of the data + status byte */ |
181 | ret = spi_sync_transfer(dev, &phy->spi_xfer, 1); | 177 | ret = spi_sync_transfer(dev, &spi_xfer, 1); |
182 | if (tpm_size > 0 && ret == 0) { | 178 | if (tpm_size > 0 && ret == 0) { |
183 | ret = rx_buf[total_length + nbr_dummy_bytes - 1]; | 179 | ret = phy->rx_buf[total_length + phy->latency - 1]; |
184 | 180 | ||
185 | memcpy(tpm_data, rx_buf + total_length + nbr_dummy_bytes, | 181 | memcpy(tpm_data, phy->rx_buf + total_length + phy->latency, |
186 | tpm_size); | 182 | tpm_size); |
187 | } | 183 | } |
188 | 184 | ||
189 | return ret; | 185 | return ret; |
190 | } /* read8_reg() */ | 186 | } /* st33zp24_spi_read8_reg() */ |
191 | 187 | ||
192 | /* | 188 | /* |
193 | * st33zp24_spi_recv | 189 | * st33zp24_spi_recv |
@@ -203,13 +199,13 @@ static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data, | |||
203 | { | 199 | { |
204 | int ret; | 200 | int ret; |
205 | 201 | ||
206 | ret = read8_reg(phy_id, tpm_register, tpm_data, tpm_size); | 202 | ret = st33zp24_spi_read8_reg(phy_id, tpm_register, tpm_data, tpm_size); |
207 | if (!st33zp24_status_to_errno(ret)) | 203 | if (!st33zp24_status_to_errno(ret)) |
208 | return tpm_size; | 204 | return tpm_size; |
209 | return ret; | 205 | return ret; |
210 | } /* st33zp24_spi_recv() */ | 206 | } /* st33zp24_spi_recv() */ |
211 | 207 | ||
212 | static int evaluate_latency(void *phy_id) | 208 | static int st33zp24_spi_evaluate_latency(void *phy_id) |
213 | { | 209 | { |
214 | struct st33zp24_spi_phy *phy = phy_id; | 210 | struct st33zp24_spi_phy *phy = phy_id; |
215 | int latency = 1, status = 0; | 211 | int latency = 1, status = 0; |
@@ -217,9 +213,15 @@ static int evaluate_latency(void *phy_id) | |||
217 | 213 | ||
218 | while (!status && latency < MAX_SPI_LATENCY) { | 214 | while (!status && latency < MAX_SPI_LATENCY) { |
219 | phy->latency = latency; | 215 | phy->latency = latency; |
220 | status = read8_reg(phy_id, TPM_INTF_CAPABILITY, &data, 1); | 216 | status = st33zp24_spi_read8_reg(phy_id, TPM_INTF_CAPABILITY, |
217 | &data, 1); | ||
221 | latency++; | 218 | latency++; |
222 | } | 219 | } |
220 | if (status < 0) | ||
221 | return status; | ||
222 | if (latency == MAX_SPI_LATENCY) | ||
223 | return -ENODEV; | ||
224 | |||
223 | return latency - 1; | 225 | return latency - 1; |
224 | } /* evaluate_latency() */ | 226 | } /* evaluate_latency() */ |
225 | 227 | ||
@@ -228,24 +230,52 @@ static const struct st33zp24_phy_ops spi_phy_ops = { | |||
228 | .recv = st33zp24_spi_recv, | 230 | .recv = st33zp24_spi_recv, |
229 | }; | 231 | }; |
230 | 232 | ||
231 | #ifdef CONFIG_OF | 233 | static int st33zp24_spi_acpi_request_resources(struct spi_device *spi_dev) |
232 | static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) | ||
233 | { | 234 | { |
235 | struct tpm_chip *chip = spi_get_drvdata(spi_dev); | ||
236 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
237 | struct st33zp24_spi_phy *phy = tpm_dev->phy_id; | ||
238 | struct gpio_desc *gpiod_lpcpd; | ||
239 | struct device *dev = &spi_dev->dev; | ||
240 | |||
241 | /* Get LPCPD GPIO from ACPI */ | ||
242 | gpiod_lpcpd = devm_gpiod_get_index(dev, "TPM IO LPCPD", 1, | ||
243 | GPIOD_OUT_HIGH); | ||
244 | if (IS_ERR(gpiod_lpcpd)) { | ||
245 | dev_err(dev, "Failed to retrieve lpcpd-gpios from acpi.\n"); | ||
246 | phy->io_lpcpd = -1; | ||
247 | /* | ||
248 | * lpcpd pin is not specified. This is not an issue as | ||
249 | * power management can be also managed by TPM specific | ||
250 | * commands. So leave with a success status code. | ||
251 | */ | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd); | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int st33zp24_spi_of_request_resources(struct spi_device *spi_dev) | ||
261 | { | ||
262 | struct tpm_chip *chip = spi_get_drvdata(spi_dev); | ||
263 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
264 | struct st33zp24_spi_phy *phy = tpm_dev->phy_id; | ||
234 | struct device_node *pp; | 265 | struct device_node *pp; |
235 | struct spi_device *dev = phy->spi_device; | ||
236 | int gpio; | 266 | int gpio; |
237 | int ret; | 267 | int ret; |
238 | 268 | ||
239 | pp = dev->dev.of_node; | 269 | pp = spi_dev->dev.of_node; |
240 | if (!pp) { | 270 | if (!pp) { |
241 | dev_err(&dev->dev, "No platform data\n"); | 271 | dev_err(&spi_dev->dev, "No platform data\n"); |
242 | return -ENODEV; | 272 | return -ENODEV; |
243 | } | 273 | } |
244 | 274 | ||
245 | /* Get GPIO from device tree */ | 275 | /* Get GPIO from device tree */ |
246 | gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0); | 276 | gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0); |
247 | if (gpio < 0) { | 277 | if (gpio < 0) { |
248 | dev_err(&dev->dev, | 278 | dev_err(&spi_dev->dev, |
249 | "Failed to retrieve lpcpd-gpios from dts.\n"); | 279 | "Failed to retrieve lpcpd-gpios from dts.\n"); |
250 | phy->io_lpcpd = -1; | 280 | phy->io_lpcpd = -1; |
251 | /* | 281 | /* |
@@ -256,26 +286,22 @@ static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) | |||
256 | return 0; | 286 | return 0; |
257 | } | 287 | } |
258 | /* GPIO request and configuration */ | 288 | /* GPIO request and configuration */ |
259 | ret = devm_gpio_request_one(&dev->dev, gpio, | 289 | ret = devm_gpio_request_one(&spi_dev->dev, gpio, |
260 | GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD"); | 290 | GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD"); |
261 | if (ret) { | 291 | if (ret) { |
262 | dev_err(&dev->dev, "Failed to request lpcpd pin\n"); | 292 | dev_err(&spi_dev->dev, "Failed to request lpcpd pin\n"); |
263 | return -ENODEV; | 293 | return -ENODEV; |
264 | } | 294 | } |
265 | phy->io_lpcpd = gpio; | 295 | phy->io_lpcpd = gpio; |
266 | 296 | ||
267 | return 0; | 297 | return 0; |
268 | } | 298 | } |
269 | #else | ||
270 | static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) | ||
271 | { | ||
272 | return -ENODEV; | ||
273 | } | ||
274 | #endif | ||
275 | 299 | ||
276 | static int tpm_stm_spi_request_resources(struct spi_device *dev, | 300 | static int st33zp24_spi_request_resources(struct spi_device *dev) |
277 | struct st33zp24_spi_phy *phy) | ||
278 | { | 301 | { |
302 | struct tpm_chip *chip = spi_get_drvdata(dev); | ||
303 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
304 | struct st33zp24_spi_phy *phy = tpm_dev->phy_id; | ||
279 | struct st33zp24_platform_data *pdata; | 305 | struct st33zp24_platform_data *pdata; |
280 | int ret; | 306 | int ret; |
281 | 307 | ||
@@ -303,13 +329,12 @@ static int tpm_stm_spi_request_resources(struct spi_device *dev, | |||
303 | } | 329 | } |
304 | 330 | ||
305 | /* | 331 | /* |
306 | * tpm_st33_spi_probe initialize the TPM device | 332 | * st33zp24_spi_probe initialize the TPM device |
307 | * @param: dev, the spi_device drescription (TPM SPI description). | 333 | * @param: dev, the spi_device drescription (TPM SPI description). |
308 | * @return: 0 in case of success. | 334 | * @return: 0 in case of success. |
309 | * or a negative value describing the error. | 335 | * or a negative value describing the error. |
310 | */ | 336 | */ |
311 | static int | 337 | static int st33zp24_spi_probe(struct spi_device *dev) |
312 | tpm_st33_spi_probe(struct spi_device *dev) | ||
313 | { | 338 | { |
314 | int ret; | 339 | int ret; |
315 | struct st33zp24_platform_data *pdata; | 340 | struct st33zp24_platform_data *pdata; |
@@ -328,21 +353,23 @@ tpm_st33_spi_probe(struct spi_device *dev) | |||
328 | return -ENOMEM; | 353 | return -ENOMEM; |
329 | 354 | ||
330 | phy->spi_device = dev; | 355 | phy->spi_device = dev; |
356 | |||
331 | pdata = dev->dev.platform_data; | 357 | pdata = dev->dev.platform_data; |
332 | if (!pdata && dev->dev.of_node) { | 358 | if (!pdata && dev->dev.of_node) { |
333 | ret = tpm_stm_spi_of_request_resources(phy); | 359 | ret = st33zp24_spi_of_request_resources(dev); |
334 | if (ret) | 360 | if (ret) |
335 | return ret; | 361 | return ret; |
336 | } else if (pdata) { | 362 | } else if (pdata) { |
337 | ret = tpm_stm_spi_request_resources(dev, phy); | 363 | ret = st33zp24_spi_request_resources(dev); |
364 | if (ret) | ||
365 | return ret; | ||
366 | } else if (ACPI_HANDLE(&dev->dev)) { | ||
367 | ret = st33zp24_spi_acpi_request_resources(dev); | ||
338 | if (ret) | 368 | if (ret) |
339 | return ret; | 369 | return ret; |
340 | } | 370 | } |
341 | 371 | ||
342 | phy->spi_xfer.tx_buf = phy->tx_buf; | 372 | phy->latency = st33zp24_spi_evaluate_latency(phy); |
343 | phy->spi_xfer.rx_buf = phy->rx_buf; | ||
344 | |||
345 | phy->latency = evaluate_latency(phy); | ||
346 | if (phy->latency <= 0) | 373 | if (phy->latency <= 0) |
347 | return -ENODEV; | 374 | return -ENODEV; |
348 | 375 | ||
@@ -351,11 +378,11 @@ tpm_st33_spi_probe(struct spi_device *dev) | |||
351 | } | 378 | } |
352 | 379 | ||
353 | /* | 380 | /* |
354 | * tpm_st33_spi_remove remove the TPM device | 381 | * st33zp24_spi_remove remove the TPM device |
355 | * @param: client, the spi_device drescription (TPM SPI description). | 382 | * @param: client, the spi_device drescription (TPM SPI description). |
356 | * @return: 0 in case of success. | 383 | * @return: 0 in case of success. |
357 | */ | 384 | */ |
358 | static int tpm_st33_spi_remove(struct spi_device *dev) | 385 | static int st33zp24_spi_remove(struct spi_device *dev) |
359 | { | 386 | { |
360 | struct tpm_chip *chip = spi_get_drvdata(dev); | 387 | struct tpm_chip *chip = spi_get_drvdata(dev); |
361 | 388 | ||
@@ -368,29 +395,34 @@ static const struct spi_device_id st33zp24_spi_id[] = { | |||
368 | }; | 395 | }; |
369 | MODULE_DEVICE_TABLE(spi, st33zp24_spi_id); | 396 | MODULE_DEVICE_TABLE(spi, st33zp24_spi_id); |
370 | 397 | ||
371 | #ifdef CONFIG_OF | ||
372 | static const struct of_device_id of_st33zp24_spi_match[] = { | 398 | static const struct of_device_id of_st33zp24_spi_match[] = { |
373 | { .compatible = "st,st33zp24-spi", }, | 399 | { .compatible = "st,st33zp24-spi", }, |
374 | {} | 400 | {} |
375 | }; | 401 | }; |
376 | MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match); | 402 | MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match); |
377 | #endif | 403 | |
404 | static const struct acpi_device_id st33zp24_spi_acpi_match[] = { | ||
405 | {"SMO3324"}, | ||
406 | {} | ||
407 | }; | ||
408 | MODULE_DEVICE_TABLE(acpi, st33zp24_spi_acpi_match); | ||
378 | 409 | ||
379 | static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend, | 410 | static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend, |
380 | st33zp24_pm_resume); | 411 | st33zp24_pm_resume); |
381 | 412 | ||
382 | static struct spi_driver tpm_st33_spi_driver = { | 413 | static struct spi_driver st33zp24_spi_driver = { |
383 | .driver = { | 414 | .driver = { |
384 | .name = TPM_ST33_SPI, | 415 | .name = TPM_ST33_SPI, |
385 | .pm = &st33zp24_spi_ops, | 416 | .pm = &st33zp24_spi_ops, |
386 | .of_match_table = of_match_ptr(of_st33zp24_spi_match), | 417 | .of_match_table = of_match_ptr(of_st33zp24_spi_match), |
418 | .acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match), | ||
387 | }, | 419 | }, |
388 | .probe = tpm_st33_spi_probe, | 420 | .probe = st33zp24_spi_probe, |
389 | .remove = tpm_st33_spi_remove, | 421 | .remove = st33zp24_spi_remove, |
390 | .id_table = st33zp24_spi_id, | 422 | .id_table = st33zp24_spi_id, |
391 | }; | 423 | }; |
392 | 424 | ||
393 | module_spi_driver(tpm_st33_spi_driver); | 425 | module_spi_driver(st33zp24_spi_driver); |
394 | 426 | ||
395 | MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); | 427 | MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); |
396 | MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver"); | 428 | MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver"); |
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 8d626784cd8d..c2ee30451e41 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * STMicroelectronics TPM Linux driver for TPM ST33ZP24 | 2 | * STMicroelectronics TPM Linux driver for TPM ST33ZP24 |
3 | * Copyright (C) 2009 - 2015 STMicroelectronics | 3 | * Copyright (C) 2009 - 2016 STMicroelectronics |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -73,14 +73,6 @@ enum tis_defaults { | |||
73 | TIS_LONG_TIMEOUT = 2000, | 73 | TIS_LONG_TIMEOUT = 2000, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct st33zp24_dev { | ||
77 | struct tpm_chip *chip; | ||
78 | void *phy_id; | ||
79 | const struct st33zp24_phy_ops *ops; | ||
80 | u32 intrs; | ||
81 | int io_lpcpd; | ||
82 | }; | ||
83 | |||
84 | /* | 76 | /* |
85 | * clear_interruption clear the pending interrupt. | 77 | * clear_interruption clear the pending interrupt. |
86 | * @param: tpm_dev, the tpm device device. | 78 | * @param: tpm_dev, the tpm device device. |
@@ -102,11 +94,9 @@ static u8 clear_interruption(struct st33zp24_dev *tpm_dev) | |||
102 | */ | 94 | */ |
103 | static void st33zp24_cancel(struct tpm_chip *chip) | 95 | static void st33zp24_cancel(struct tpm_chip *chip) |
104 | { | 96 | { |
105 | struct st33zp24_dev *tpm_dev; | 97 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); |
106 | u8 data; | 98 | u8 data; |
107 | 99 | ||
108 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
109 | |||
110 | data = TPM_STS_COMMAND_READY; | 100 | data = TPM_STS_COMMAND_READY; |
111 | tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1); | 101 | tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1); |
112 | } /* st33zp24_cancel() */ | 102 | } /* st33zp24_cancel() */ |
@@ -118,11 +108,9 @@ static void st33zp24_cancel(struct tpm_chip *chip) | |||
118 | */ | 108 | */ |
119 | static u8 st33zp24_status(struct tpm_chip *chip) | 109 | static u8 st33zp24_status(struct tpm_chip *chip) |
120 | { | 110 | { |
121 | struct st33zp24_dev *tpm_dev; | 111 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); |
122 | u8 data; | 112 | u8 data; |
123 | 113 | ||
124 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
125 | |||
126 | tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1); | 114 | tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1); |
127 | return data; | 115 | return data; |
128 | } /* st33zp24_status() */ | 116 | } /* st33zp24_status() */ |
@@ -134,17 +122,15 @@ static u8 st33zp24_status(struct tpm_chip *chip) | |||
134 | */ | 122 | */ |
135 | static int check_locality(struct tpm_chip *chip) | 123 | static int check_locality(struct tpm_chip *chip) |
136 | { | 124 | { |
137 | struct st33zp24_dev *tpm_dev; | 125 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); |
138 | u8 data; | 126 | u8 data; |
139 | u8 status; | 127 | u8 status; |
140 | 128 | ||
141 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
142 | |||
143 | status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1); | 129 | status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1); |
144 | if (status && (data & | 130 | if (status && (data & |
145 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == | 131 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == |
146 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) | 132 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) |
147 | return chip->vendor.locality; | 133 | return tpm_dev->locality; |
148 | 134 | ||
149 | return -EACCES; | 135 | return -EACCES; |
150 | } /* check_locality() */ | 136 | } /* check_locality() */ |
@@ -156,27 +142,25 @@ static int check_locality(struct tpm_chip *chip) | |||
156 | */ | 142 | */ |
157 | static int request_locality(struct tpm_chip *chip) | 143 | static int request_locality(struct tpm_chip *chip) |
158 | { | 144 | { |
145 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
159 | unsigned long stop; | 146 | unsigned long stop; |
160 | long ret; | 147 | long ret; |
161 | struct st33zp24_dev *tpm_dev; | ||
162 | u8 data; | 148 | u8 data; |
163 | 149 | ||
164 | if (check_locality(chip) == chip->vendor.locality) | 150 | if (check_locality(chip) == tpm_dev->locality) |
165 | return chip->vendor.locality; | 151 | return tpm_dev->locality; |
166 | |||
167 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
168 | 152 | ||
169 | data = TPM_ACCESS_REQUEST_USE; | 153 | data = TPM_ACCESS_REQUEST_USE; |
170 | ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); | 154 | ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); |
171 | if (ret < 0) | 155 | if (ret < 0) |
172 | return ret; | 156 | return ret; |
173 | 157 | ||
174 | stop = jiffies + chip->vendor.timeout_a; | 158 | stop = jiffies + chip->timeout_a; |
175 | 159 | ||
176 | /* Request locality is usually effective after the request */ | 160 | /* Request locality is usually effective after the request */ |
177 | do { | 161 | do { |
178 | if (check_locality(chip) >= 0) | 162 | if (check_locality(chip) >= 0) |
179 | return chip->vendor.locality; | 163 | return tpm_dev->locality; |
180 | msleep(TPM_TIMEOUT); | 164 | msleep(TPM_TIMEOUT); |
181 | } while (time_before(jiffies, stop)); | 165 | } while (time_before(jiffies, stop)); |
182 | 166 | ||
@@ -190,10 +174,9 @@ static int request_locality(struct tpm_chip *chip) | |||
190 | */ | 174 | */ |
191 | static void release_locality(struct tpm_chip *chip) | 175 | static void release_locality(struct tpm_chip *chip) |
192 | { | 176 | { |
193 | struct st33zp24_dev *tpm_dev; | 177 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); |
194 | u8 data; | 178 | u8 data; |
195 | 179 | ||
196 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
197 | data = TPM_ACCESS_ACTIVE_LOCALITY; | 180 | data = TPM_ACCESS_ACTIVE_LOCALITY; |
198 | 181 | ||
199 | tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); | 182 | tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); |
@@ -206,23 +189,21 @@ static void release_locality(struct tpm_chip *chip) | |||
206 | */ | 189 | */ |
207 | static int get_burstcount(struct tpm_chip *chip) | 190 | static int get_burstcount(struct tpm_chip *chip) |
208 | { | 191 | { |
192 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
209 | unsigned long stop; | 193 | unsigned long stop; |
210 | int burstcnt, status; | 194 | int burstcnt, status; |
211 | u8 tpm_reg, temp; | 195 | u8 temp; |
212 | struct st33zp24_dev *tpm_dev; | ||
213 | |||
214 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
215 | 196 | ||
216 | stop = jiffies + chip->vendor.timeout_d; | 197 | stop = jiffies + chip->timeout_d; |
217 | do { | 198 | do { |
218 | tpm_reg = TPM_STS + 1; | 199 | status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 1, |
219 | status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1); | 200 | &temp, 1); |
220 | if (status < 0) | 201 | if (status < 0) |
221 | return -EBUSY; | 202 | return -EBUSY; |
222 | 203 | ||
223 | tpm_reg = TPM_STS + 2; | ||
224 | burstcnt = temp; | 204 | burstcnt = temp; |
225 | status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1); | 205 | status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 2, |
206 | &temp, 1); | ||
226 | if (status < 0) | 207 | if (status < 0) |
227 | return -EBUSY; | 208 | return -EBUSY; |
228 | 209 | ||
@@ -271,15 +252,13 @@ static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, | |||
271 | static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, | 252 | static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, |
272 | wait_queue_head_t *queue, bool check_cancel) | 253 | wait_queue_head_t *queue, bool check_cancel) |
273 | { | 254 | { |
255 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
274 | unsigned long stop; | 256 | unsigned long stop; |
275 | int ret = 0; | 257 | int ret = 0; |
276 | bool canceled = false; | 258 | bool canceled = false; |
277 | bool condition; | 259 | bool condition; |
278 | u32 cur_intrs; | 260 | u32 cur_intrs; |
279 | u8 status; | 261 | u8 status; |
280 | struct st33zp24_dev *tpm_dev; | ||
281 | |||
282 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
283 | 262 | ||
284 | /* check current status */ | 263 | /* check current status */ |
285 | status = st33zp24_status(chip); | 264 | status = st33zp24_status(chip); |
@@ -288,10 +267,10 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, | |||
288 | 267 | ||
289 | stop = jiffies + timeout; | 268 | stop = jiffies + timeout; |
290 | 269 | ||
291 | if (chip->vendor.irq) { | 270 | if (chip->flags & TPM_CHIP_FLAG_IRQ) { |
292 | cur_intrs = tpm_dev->intrs; | 271 | cur_intrs = tpm_dev->intrs; |
293 | clear_interruption(tpm_dev); | 272 | clear_interruption(tpm_dev); |
294 | enable_irq(chip->vendor.irq); | 273 | enable_irq(tpm_dev->irq); |
295 | 274 | ||
296 | do { | 275 | do { |
297 | if (ret == -ERESTARTSYS && freezing(current)) | 276 | if (ret == -ERESTARTSYS && freezing(current)) |
@@ -314,7 +293,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, | |||
314 | } | 293 | } |
315 | } while (ret == -ERESTARTSYS && freezing(current)); | 294 | } while (ret == -ERESTARTSYS && freezing(current)); |
316 | 295 | ||
317 | disable_irq_nosync(chip->vendor.irq); | 296 | disable_irq_nosync(tpm_dev->irq); |
318 | 297 | ||
319 | } else { | 298 | } else { |
320 | do { | 299 | do { |
@@ -337,16 +316,14 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, | |||
337 | */ | 316 | */ |
338 | static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | 317 | static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) |
339 | { | 318 | { |
319 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
340 | int size = 0, burstcnt, len, ret; | 320 | int size = 0, burstcnt, len, ret; |
341 | struct st33zp24_dev *tpm_dev; | ||
342 | |||
343 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
344 | 321 | ||
345 | while (size < count && | 322 | while (size < count && |
346 | wait_for_stat(chip, | 323 | wait_for_stat(chip, |
347 | TPM_STS_DATA_AVAIL | TPM_STS_VALID, | 324 | TPM_STS_DATA_AVAIL | TPM_STS_VALID, |
348 | chip->vendor.timeout_c, | 325 | chip->timeout_c, |
349 | &chip->vendor.read_queue, true) == 0) { | 326 | &tpm_dev->read_queue, true) == 0) { |
350 | burstcnt = get_burstcount(chip); | 327 | burstcnt = get_burstcount(chip); |
351 | if (burstcnt < 0) | 328 | if (burstcnt < 0) |
352 | return burstcnt; | 329 | return burstcnt; |
@@ -370,13 +347,11 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | |||
370 | static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) | 347 | static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) |
371 | { | 348 | { |
372 | struct tpm_chip *chip = dev_id; | 349 | struct tpm_chip *chip = dev_id; |
373 | struct st33zp24_dev *tpm_dev; | 350 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); |
374 | |||
375 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
376 | 351 | ||
377 | tpm_dev->intrs++; | 352 | tpm_dev->intrs++; |
378 | wake_up_interruptible(&chip->vendor.read_queue); | 353 | wake_up_interruptible(&tpm_dev->read_queue); |
379 | disable_irq_nosync(chip->vendor.irq); | 354 | disable_irq_nosync(tpm_dev->irq); |
380 | 355 | ||
381 | return IRQ_HANDLED; | 356 | return IRQ_HANDLED; |
382 | } /* tpm_ioserirq_handler() */ | 357 | } /* tpm_ioserirq_handler() */ |
@@ -393,19 +368,17 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) | |||
393 | static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, | 368 | static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, |
394 | size_t len) | 369 | size_t len) |
395 | { | 370 | { |
371 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); | ||
396 | u32 status, i, size, ordinal; | 372 | u32 status, i, size, ordinal; |
397 | int burstcnt = 0; | 373 | int burstcnt = 0; |
398 | int ret; | 374 | int ret; |
399 | u8 data; | 375 | u8 data; |
400 | struct st33zp24_dev *tpm_dev; | ||
401 | 376 | ||
402 | if (!chip) | 377 | if (!chip) |
403 | return -EBUSY; | 378 | return -EBUSY; |
404 | if (len < TPM_HEADER_SIZE) | 379 | if (len < TPM_HEADER_SIZE) |
405 | return -EBUSY; | 380 | return -EBUSY; |
406 | 381 | ||
407 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
408 | |||
409 | ret = request_locality(chip); | 382 | ret = request_locality(chip); |
410 | if (ret < 0) | 383 | if (ret < 0) |
411 | return ret; | 384 | return ret; |
@@ -414,8 +387,8 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, | |||
414 | if ((status & TPM_STS_COMMAND_READY) == 0) { | 387 | if ((status & TPM_STS_COMMAND_READY) == 0) { |
415 | st33zp24_cancel(chip); | 388 | st33zp24_cancel(chip); |
416 | if (wait_for_stat | 389 | if (wait_for_stat |
417 | (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, | 390 | (chip, TPM_STS_COMMAND_READY, chip->timeout_b, |
418 | &chip->vendor.read_queue, false) < 0) { | 391 | &tpm_dev->read_queue, false) < 0) { |
419 | ret = -ETIME; | 392 | ret = -ETIME; |
420 | goto out_err; | 393 | goto out_err; |
421 | } | 394 | } |
@@ -456,12 +429,12 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, | |||
456 | if (ret < 0) | 429 | if (ret < 0) |
457 | goto out_err; | 430 | goto out_err; |
458 | 431 | ||
459 | if (chip->vendor.irq) { | 432 | if (chip->flags & TPM_CHIP_FLAG_IRQ) { |
460 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); | 433 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); |
461 | 434 | ||
462 | ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, | 435 | ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, |
463 | tpm_calc_ordinal_duration(chip, ordinal), | 436 | tpm_calc_ordinal_duration(chip, ordinal), |
464 | &chip->vendor.read_queue, false); | 437 | &tpm_dev->read_queue, false); |
465 | if (ret < 0) | 438 | if (ret < 0) |
466 | goto out_err; | 439 | goto out_err; |
467 | } | 440 | } |
@@ -532,6 +505,7 @@ static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status) | |||
532 | } | 505 | } |
533 | 506 | ||
534 | static const struct tpm_class_ops st33zp24_tpm = { | 507 | static const struct tpm_class_ops st33zp24_tpm = { |
508 | .flags = TPM_OPS_AUTO_STARTUP, | ||
535 | .send = st33zp24_send, | 509 | .send = st33zp24_send, |
536 | .recv = st33zp24_recv, | 510 | .recv = st33zp24_recv, |
537 | .cancel = st33zp24_cancel, | 511 | .cancel = st33zp24_cancel, |
@@ -565,20 +539,20 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, | |||
565 | if (!tpm_dev) | 539 | if (!tpm_dev) |
566 | return -ENOMEM; | 540 | return -ENOMEM; |
567 | 541 | ||
568 | TPM_VPRIV(chip) = tpm_dev; | ||
569 | tpm_dev->phy_id = phy_id; | 542 | tpm_dev->phy_id = phy_id; |
570 | tpm_dev->ops = ops; | 543 | tpm_dev->ops = ops; |
544 | dev_set_drvdata(&chip->dev, tpm_dev); | ||
571 | 545 | ||
572 | chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | 546 | chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); |
573 | chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); | 547 | chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); |
574 | chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | 548 | chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); |
575 | chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | 549 | chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); |
576 | 550 | ||
577 | chip->vendor.locality = LOCALITY0; | 551 | tpm_dev->locality = LOCALITY0; |
578 | 552 | ||
579 | if (irq) { | 553 | if (irq) { |
580 | /* INTERRUPT Setup */ | 554 | /* INTERRUPT Setup */ |
581 | init_waitqueue_head(&chip->vendor.read_queue); | 555 | init_waitqueue_head(&tpm_dev->read_queue); |
582 | tpm_dev->intrs = 0; | 556 | tpm_dev->intrs = 0; |
583 | 557 | ||
584 | if (request_locality(chip) != LOCALITY0) { | 558 | if (request_locality(chip) != LOCALITY0) { |
@@ -611,16 +585,14 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, | |||
611 | if (ret < 0) | 585 | if (ret < 0) |
612 | goto _tpm_clean_answer; | 586 | goto _tpm_clean_answer; |
613 | 587 | ||
614 | chip->vendor.irq = irq; | 588 | tpm_dev->irq = irq; |
589 | chip->flags |= TPM_CHIP_FLAG_IRQ; | ||
615 | 590 | ||
616 | disable_irq_nosync(chip->vendor.irq); | 591 | disable_irq_nosync(tpm_dev->irq); |
617 | 592 | ||
618 | tpm_gen_interrupt(chip); | 593 | tpm_gen_interrupt(chip); |
619 | } | 594 | } |
620 | 595 | ||
621 | tpm_get_timeouts(chip); | ||
622 | tpm_do_selftest(chip); | ||
623 | |||
624 | return tpm_chip_register(chip); | 596 | return tpm_chip_register(chip); |
625 | _tpm_clean_answer: | 597 | _tpm_clean_answer: |
626 | dev_info(&chip->dev, "TPM initialization fail\n"); | 598 | dev_info(&chip->dev, "TPM initialization fail\n"); |
@@ -650,10 +622,9 @@ EXPORT_SYMBOL(st33zp24_remove); | |||
650 | int st33zp24_pm_suspend(struct device *dev) | 622 | int st33zp24_pm_suspend(struct device *dev) |
651 | { | 623 | { |
652 | struct tpm_chip *chip = dev_get_drvdata(dev); | 624 | struct tpm_chip *chip = dev_get_drvdata(dev); |
653 | struct st33zp24_dev *tpm_dev; | 625 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); |
654 | int ret = 0; | ||
655 | 626 | ||
656 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | 627 | int ret = 0; |
657 | 628 | ||
658 | if (gpio_is_valid(tpm_dev->io_lpcpd)) | 629 | if (gpio_is_valid(tpm_dev->io_lpcpd)) |
659 | gpio_set_value(tpm_dev->io_lpcpd, 0); | 630 | gpio_set_value(tpm_dev->io_lpcpd, 0); |
@@ -672,16 +643,14 @@ EXPORT_SYMBOL(st33zp24_pm_suspend); | |||
672 | int st33zp24_pm_resume(struct device *dev) | 643 | int st33zp24_pm_resume(struct device *dev) |
673 | { | 644 | { |
674 | struct tpm_chip *chip = dev_get_drvdata(dev); | 645 | struct tpm_chip *chip = dev_get_drvdata(dev); |
675 | struct st33zp24_dev *tpm_dev; | 646 | struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); |
676 | int ret = 0; | 647 | int ret = 0; |
677 | 648 | ||
678 | tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); | ||
679 | |||
680 | if (gpio_is_valid(tpm_dev->io_lpcpd)) { | 649 | if (gpio_is_valid(tpm_dev->io_lpcpd)) { |
681 | gpio_set_value(tpm_dev->io_lpcpd, 1); | 650 | gpio_set_value(tpm_dev->io_lpcpd, 1); |
682 | ret = wait_for_stat(chip, | 651 | ret = wait_for_stat(chip, |
683 | TPM_STS_VALID, chip->vendor.timeout_b, | 652 | TPM_STS_VALID, chip->timeout_b, |
684 | &chip->vendor.read_queue, false); | 653 | &tpm_dev->read_queue, false); |
685 | } else { | 654 | } else { |
686 | ret = tpm_pm_resume(dev); | 655 | ret = tpm_pm_resume(dev); |
687 | if (!ret) | 656 | if (!ret) |
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h index c207cebf67dd..6f4a4198af6a 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.h +++ b/drivers/char/tpm/st33zp24/st33zp24.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * STMicroelectronics TPM Linux driver for TPM ST33ZP24 | 2 | * STMicroelectronics TPM Linux driver for TPM ST33ZP24 |
3 | * Copyright (C) 2009 - 2015 STMicroelectronics | 3 | * Copyright (C) 2009 - 2016 STMicroelectronics |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, | 6 | * under the terms and conditions of the GNU General Public License, |
@@ -21,6 +21,18 @@ | |||
21 | #define TPM_WRITE_DIRECTION 0x80 | 21 | #define TPM_WRITE_DIRECTION 0x80 |
22 | #define TPM_BUFSIZE 2048 | 22 | #define TPM_BUFSIZE 2048 |
23 | 23 | ||
24 | struct st33zp24_dev { | ||
25 | struct tpm_chip *chip; | ||
26 | void *phy_id; | ||
27 | const struct st33zp24_phy_ops *ops; | ||
28 | int locality; | ||
29 | int irq; | ||
30 | u32 intrs; | ||
31 | int io_lpcpd; | ||
32 | wait_queue_head_t read_queue; | ||
33 | }; | ||
34 | |||
35 | |||
24 | struct st33zp24_phy_ops { | 36 | struct st33zp24_phy_ops { |
25 | int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); | 37 | int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); |
26 | int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); | 38 | int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); |
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 274dd0123237..e5950131bd90 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c | |||
@@ -29,33 +29,88 @@ | |||
29 | #include "tpm.h" | 29 | #include "tpm.h" |
30 | #include "tpm_eventlog.h" | 30 | #include "tpm_eventlog.h" |
31 | 31 | ||
32 | static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); | 32 | DEFINE_IDR(dev_nums_idr); |
33 | static LIST_HEAD(tpm_chip_list); | 33 | static DEFINE_MUTEX(idr_lock); |
34 | static DEFINE_SPINLOCK(driver_lock); | ||
35 | 34 | ||
36 | struct class *tpm_class; | 35 | struct class *tpm_class; |
37 | dev_t tpm_devt; | 36 | dev_t tpm_devt; |
38 | 37 | ||
39 | /* | 38 | /** |
40 | * tpm_chip_find_get - return tpm_chip for a given chip number | 39 | * tpm_try_get_ops() - Get a ref to the tpm_chip |
41 | * @chip_num the device number for the chip | 40 | * @chip: Chip to ref |
41 | * | ||
42 | * The caller must already have some kind of locking to ensure that chip is | ||
43 | * valid. This function will lock the chip so that the ops member can be | ||
44 | * accessed safely. The locking prevents tpm_chip_unregister from | ||
45 | * completing, so it should not be held for long periods. | ||
46 | * | ||
47 | * Returns -ERRNO if the chip could not be got. | ||
42 | */ | 48 | */ |
43 | struct tpm_chip *tpm_chip_find_get(int chip_num) | 49 | int tpm_try_get_ops(struct tpm_chip *chip) |
44 | { | 50 | { |
45 | struct tpm_chip *pos, *chip = NULL; | 51 | int rc = -EIO; |
46 | 52 | ||
47 | rcu_read_lock(); | 53 | get_device(&chip->dev); |
48 | list_for_each_entry_rcu(pos, &tpm_chip_list, list) { | ||
49 | if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) | ||
50 | continue; | ||
51 | 54 | ||
52 | if (try_module_get(pos->pdev->driver->owner)) { | 55 | down_read(&chip->ops_sem); |
53 | chip = pos; | 56 | if (!chip->ops) |
54 | break; | 57 | goto out_lock; |
55 | } | 58 | |
59 | return 0; | ||
60 | out_lock: | ||
61 | up_read(&chip->ops_sem); | ||
62 | put_device(&chip->dev); | ||
63 | return rc; | ||
64 | } | ||
65 | EXPORT_SYMBOL_GPL(tpm_try_get_ops); | ||
66 | |||
67 | /** | ||
68 | * tpm_put_ops() - Release a ref to the tpm_chip | ||
69 | * @chip: Chip to put | ||
70 | * | ||
71 | * This is the opposite pair to tpm_try_get_ops(). After this returns chip may | ||
72 | * be kfree'd. | ||
73 | */ | ||
74 | void tpm_put_ops(struct tpm_chip *chip) | ||
75 | { | ||
76 | up_read(&chip->ops_sem); | ||
77 | put_device(&chip->dev); | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(tpm_put_ops); | ||
80 | |||
81 | /** | ||
82 | * tpm_chip_find_get() - return tpm_chip for a given chip number | ||
83 | * @chip_num: id to find | ||
84 | * | ||
85 | * The return'd chip has been tpm_try_get_ops'd and must be released via | ||
86 | * tpm_put_ops | ||
87 | */ | ||
88 | struct tpm_chip *tpm_chip_find_get(int chip_num) | ||
89 | { | ||
90 | struct tpm_chip *chip, *res = NULL; | ||
91 | int chip_prev; | ||
92 | |||
93 | mutex_lock(&idr_lock); | ||
94 | |||
95 | if (chip_num == TPM_ANY_NUM) { | ||
96 | chip_num = 0; | ||
97 | do { | ||
98 | chip_prev = chip_num; | ||
99 | chip = idr_get_next(&dev_nums_idr, &chip_num); | ||
100 | if (chip && !tpm_try_get_ops(chip)) { | ||
101 | res = chip; | ||
102 | break; | ||
103 | } | ||
104 | } while (chip_prev != chip_num); | ||
105 | } else { | ||
106 | chip = idr_find_slowpath(&dev_nums_idr, chip_num); | ||
107 | if (chip && !tpm_try_get_ops(chip)) | ||
108 | res = chip; | ||
56 | } | 109 | } |
57 | rcu_read_unlock(); | 110 | |
58 | return chip; | 111 | mutex_unlock(&idr_lock); |
112 | |||
113 | return res; | ||
59 | } | 114 | } |
60 | 115 | ||
61 | /** | 116 | /** |
@@ -68,24 +123,25 @@ static void tpm_dev_release(struct device *dev) | |||
68 | { | 123 | { |
69 | struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); | 124 | struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); |
70 | 125 | ||
71 | spin_lock(&driver_lock); | 126 | mutex_lock(&idr_lock); |
72 | clear_bit(chip->dev_num, dev_mask); | 127 | idr_remove(&dev_nums_idr, chip->dev_num); |
73 | spin_unlock(&driver_lock); | 128 | mutex_unlock(&idr_lock); |
129 | |||
74 | kfree(chip); | 130 | kfree(chip); |
75 | } | 131 | } |
76 | 132 | ||
77 | /** | 133 | /** |
78 | * tpmm_chip_alloc() - allocate a new struct tpm_chip instance | 134 | * tpm_chip_alloc() - allocate a new struct tpm_chip instance |
79 | * @dev: device to which the chip is associated | 135 | * @pdev: device to which the chip is associated |
136 | * At this point pdev mst be initialized, but does not have to | ||
137 | * be registered | ||
80 | * @ops: struct tpm_class_ops instance | 138 | * @ops: struct tpm_class_ops instance |
81 | * | 139 | * |
82 | * Allocates a new struct tpm_chip instance and assigns a free | 140 | * Allocates a new struct tpm_chip instance and assigns a free |
83 | * device number for it. Caller does not have to worry about | 141 | * device number for it. Must be paired with put_device(&chip->dev). |
84 | * freeing the allocated resources. When the devices is removed | ||
85 | * devres calls tpmm_chip_remove() to do the job. | ||
86 | */ | 142 | */ |
87 | struct tpm_chip *tpmm_chip_alloc(struct device *dev, | 143 | struct tpm_chip *tpm_chip_alloc(struct device *dev, |
88 | const struct tpm_class_ops *ops) | 144 | const struct tpm_class_ops *ops) |
89 | { | 145 | { |
90 | struct tpm_chip *chip; | 146 | struct tpm_chip *chip; |
91 | int rc; | 147 | int rc; |
@@ -95,53 +151,75 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, | |||
95 | return ERR_PTR(-ENOMEM); | 151 | return ERR_PTR(-ENOMEM); |
96 | 152 | ||
97 | mutex_init(&chip->tpm_mutex); | 153 | mutex_init(&chip->tpm_mutex); |
98 | INIT_LIST_HEAD(&chip->list); | 154 | init_rwsem(&chip->ops_sem); |
99 | 155 | ||
100 | chip->ops = ops; | 156 | chip->ops = ops; |
101 | 157 | ||
102 | spin_lock(&driver_lock); | 158 | mutex_lock(&idr_lock); |
103 | chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES); | 159 | rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL); |
104 | spin_unlock(&driver_lock); | 160 | mutex_unlock(&idr_lock); |
105 | 161 | if (rc < 0) { | |
106 | if (chip->dev_num >= TPM_NUM_DEVICES) { | ||
107 | dev_err(dev, "No available tpm device numbers\n"); | 162 | dev_err(dev, "No available tpm device numbers\n"); |
108 | kfree(chip); | 163 | kfree(chip); |
109 | return ERR_PTR(-ENOMEM); | 164 | return ERR_PTR(rc); |
110 | } | 165 | } |
166 | chip->dev_num = rc; | ||
111 | 167 | ||
112 | set_bit(chip->dev_num, dev_mask); | 168 | device_initialize(&chip->dev); |
113 | |||
114 | scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num); | ||
115 | |||
116 | chip->pdev = dev; | ||
117 | |||
118 | dev_set_drvdata(dev, chip); | ||
119 | 169 | ||
120 | chip->dev.class = tpm_class; | 170 | chip->dev.class = tpm_class; |
121 | chip->dev.release = tpm_dev_release; | 171 | chip->dev.release = tpm_dev_release; |
122 | chip->dev.parent = chip->pdev; | 172 | chip->dev.parent = dev; |
123 | #ifdef CONFIG_ACPI | ||
124 | chip->dev.groups = chip->groups; | 173 | chip->dev.groups = chip->groups; |
125 | #endif | ||
126 | 174 | ||
127 | if (chip->dev_num == 0) | 175 | if (chip->dev_num == 0) |
128 | chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); | 176 | chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); |
129 | else | 177 | else |
130 | chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); | 178 | chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); |
131 | 179 | ||
132 | dev_set_name(&chip->dev, "%s", chip->devname); | 180 | rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); |
181 | if (rc) | ||
182 | goto out; | ||
133 | 183 | ||
134 | device_initialize(&chip->dev); | 184 | if (!dev) |
185 | chip->flags |= TPM_CHIP_FLAG_VIRTUAL; | ||
135 | 186 | ||
136 | cdev_init(&chip->cdev, &tpm_fops); | 187 | cdev_init(&chip->cdev, &tpm_fops); |
137 | chip->cdev.owner = chip->pdev->driver->owner; | 188 | chip->cdev.owner = THIS_MODULE; |
138 | chip->cdev.kobj.parent = &chip->dev.kobj; | 189 | chip->cdev.kobj.parent = &chip->dev.kobj; |
139 | 190 | ||
140 | rc = devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev); | 191 | return chip; |
141 | if (rc) { | 192 | |
142 | put_device(&chip->dev); | 193 | out: |
194 | put_device(&chip->dev); | ||
195 | return ERR_PTR(rc); | ||
196 | } | ||
197 | EXPORT_SYMBOL_GPL(tpm_chip_alloc); | ||
198 | |||
199 | /** | ||
200 | * tpmm_chip_alloc() - allocate a new struct tpm_chip instance | ||
201 | * @pdev: parent device to which the chip is associated | ||
202 | * @ops: struct tpm_class_ops instance | ||
203 | * | ||
204 | * Same as tpm_chip_alloc except devm is used to do the put_device | ||
205 | */ | ||
206 | struct tpm_chip *tpmm_chip_alloc(struct device *pdev, | ||
207 | const struct tpm_class_ops *ops) | ||
208 | { | ||
209 | struct tpm_chip *chip; | ||
210 | int rc; | ||
211 | |||
212 | chip = tpm_chip_alloc(pdev, ops); | ||
213 | if (IS_ERR(chip)) | ||
214 | return chip; | ||
215 | |||
216 | rc = devm_add_action_or_reset(pdev, | ||
217 | (void (*)(void *)) put_device, | ||
218 | &chip->dev); | ||
219 | if (rc) | ||
143 | return ERR_PTR(rc); | 220 | return ERR_PTR(rc); |
144 | } | 221 | |
222 | dev_set_drvdata(pdev, chip); | ||
145 | 223 | ||
146 | return chip; | 224 | return chip; |
147 | } | 225 | } |
@@ -155,7 +233,7 @@ static int tpm_add_char_device(struct tpm_chip *chip) | |||
155 | if (rc) { | 233 | if (rc) { |
156 | dev_err(&chip->dev, | 234 | dev_err(&chip->dev, |
157 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", | 235 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", |
158 | chip->devname, MAJOR(chip->dev.devt), | 236 | dev_name(&chip->dev), MAJOR(chip->dev.devt), |
159 | MINOR(chip->dev.devt), rc); | 237 | MINOR(chip->dev.devt), rc); |
160 | 238 | ||
161 | return rc; | 239 | return rc; |
@@ -165,13 +243,18 @@ static int tpm_add_char_device(struct tpm_chip *chip) | |||
165 | if (rc) { | 243 | if (rc) { |
166 | dev_err(&chip->dev, | 244 | dev_err(&chip->dev, |
167 | "unable to device_register() %s, major %d, minor %d, err=%d\n", | 245 | "unable to device_register() %s, major %d, minor %d, err=%d\n", |
168 | chip->devname, MAJOR(chip->dev.devt), | 246 | dev_name(&chip->dev), MAJOR(chip->dev.devt), |
169 | MINOR(chip->dev.devt), rc); | 247 | MINOR(chip->dev.devt), rc); |
170 | 248 | ||
171 | cdev_del(&chip->cdev); | 249 | cdev_del(&chip->cdev); |
172 | return rc; | 250 | return rc; |
173 | } | 251 | } |
174 | 252 | ||
253 | /* Make the chip available. */ | ||
254 | mutex_lock(&idr_lock); | ||
255 | idr_replace(&dev_nums_idr, chip, chip->dev_num); | ||
256 | mutex_unlock(&idr_lock); | ||
257 | |||
175 | return rc; | 258 | return rc; |
176 | } | 259 | } |
177 | 260 | ||
@@ -179,20 +262,28 @@ static void tpm_del_char_device(struct tpm_chip *chip) | |||
179 | { | 262 | { |
180 | cdev_del(&chip->cdev); | 263 | cdev_del(&chip->cdev); |
181 | device_del(&chip->dev); | 264 | device_del(&chip->dev); |
265 | |||
266 | /* Make the chip unavailable. */ | ||
267 | mutex_lock(&idr_lock); | ||
268 | idr_replace(&dev_nums_idr, NULL, chip->dev_num); | ||
269 | mutex_unlock(&idr_lock); | ||
270 | |||
271 | /* Make the driver uncallable. */ | ||
272 | down_write(&chip->ops_sem); | ||
273 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
274 | tpm2_shutdown(chip, TPM2_SU_CLEAR); | ||
275 | chip->ops = NULL; | ||
276 | up_write(&chip->ops_sem); | ||
182 | } | 277 | } |
183 | 278 | ||
184 | static int tpm1_chip_register(struct tpm_chip *chip) | 279 | static int tpm1_chip_register(struct tpm_chip *chip) |
185 | { | 280 | { |
186 | int rc; | ||
187 | |||
188 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | 281 | if (chip->flags & TPM_CHIP_FLAG_TPM2) |
189 | return 0; | 282 | return 0; |
190 | 283 | ||
191 | rc = tpm_sysfs_add_device(chip); | 284 | tpm_sysfs_add_device(chip); |
192 | if (rc) | ||
193 | return rc; | ||
194 | 285 | ||
195 | chip->bios_dir = tpm_bios_log_setup(chip->devname); | 286 | chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev)); |
196 | 287 | ||
197 | return 0; | 288 | return 0; |
198 | } | 289 | } |
@@ -204,10 +295,50 @@ static void tpm1_chip_unregister(struct tpm_chip *chip) | |||
204 | 295 | ||
205 | if (chip->bios_dir) | 296 | if (chip->bios_dir) |
206 | tpm_bios_log_teardown(chip->bios_dir); | 297 | tpm_bios_log_teardown(chip->bios_dir); |
298 | } | ||
299 | |||
300 | static void tpm_del_legacy_sysfs(struct tpm_chip *chip) | ||
301 | { | ||
302 | struct attribute **i; | ||
303 | |||
304 | if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) | ||
305 | return; | ||
306 | |||
307 | sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); | ||
207 | 308 | ||
208 | tpm_sysfs_del_device(chip); | 309 | for (i = chip->groups[0]->attrs; *i != NULL; ++i) |
310 | sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name); | ||
209 | } | 311 | } |
210 | 312 | ||
313 | /* For compatibility with legacy sysfs paths we provide symlinks from the | ||
314 | * parent dev directory to selected names within the tpm chip directory. Old | ||
315 | * kernel versions created these files directly under the parent. | ||
316 | */ | ||
317 | static int tpm_add_legacy_sysfs(struct tpm_chip *chip) | ||
318 | { | ||
319 | struct attribute **i; | ||
320 | int rc; | ||
321 | |||
322 | if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) | ||
323 | return 0; | ||
324 | |||
325 | rc = __compat_only_sysfs_link_entry_to_kobj( | ||
326 | &chip->dev.parent->kobj, &chip->dev.kobj, "ppi"); | ||
327 | if (rc && rc != -ENOENT) | ||
328 | return rc; | ||
329 | |||
330 | /* All the names from tpm-sysfs */ | ||
331 | for (i = chip->groups[0]->attrs; *i != NULL; ++i) { | ||
332 | rc = __compat_only_sysfs_link_entry_to_kobj( | ||
333 | &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name); | ||
334 | if (rc) { | ||
335 | tpm_del_legacy_sysfs(chip); | ||
336 | return rc; | ||
337 | } | ||
338 | } | ||
339 | |||
340 | return 0; | ||
341 | } | ||
211 | /* | 342 | /* |
212 | * tpm_chip_register() - create a character device for the TPM chip | 343 | * tpm_chip_register() - create a character device for the TPM chip |
213 | * @chip: TPM chip to use. | 344 | * @chip: TPM chip to use. |
@@ -223,6 +354,15 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
223 | { | 354 | { |
224 | int rc; | 355 | int rc; |
225 | 356 | ||
357 | if (chip->ops->flags & TPM_OPS_AUTO_STARTUP) { | ||
358 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
359 | rc = tpm2_auto_startup(chip); | ||
360 | else | ||
361 | rc = tpm1_auto_startup(chip); | ||
362 | if (rc) | ||
363 | return rc; | ||
364 | } | ||
365 | |||
226 | rc = tpm1_chip_register(chip); | 366 | rc = tpm1_chip_register(chip); |
227 | if (rc) | 367 | if (rc) |
228 | return rc; | 368 | return rc; |
@@ -230,30 +370,20 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
230 | tpm_add_ppi(chip); | 370 | tpm_add_ppi(chip); |
231 | 371 | ||
232 | rc = tpm_add_char_device(chip); | 372 | rc = tpm_add_char_device(chip); |
233 | if (rc) | 373 | if (rc) { |
234 | goto out_err; | 374 | tpm1_chip_unregister(chip); |
235 | 375 | return rc; | |
236 | /* Make the chip available. */ | 376 | } |
237 | spin_lock(&driver_lock); | ||
238 | list_add_tail_rcu(&chip->list, &tpm_chip_list); | ||
239 | spin_unlock(&driver_lock); | ||
240 | 377 | ||
241 | chip->flags |= TPM_CHIP_FLAG_REGISTERED; | 378 | chip->flags |= TPM_CHIP_FLAG_REGISTERED; |
242 | 379 | ||
243 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { | 380 | rc = tpm_add_legacy_sysfs(chip); |
244 | rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj, | 381 | if (rc) { |
245 | &chip->dev.kobj, | 382 | tpm_chip_unregister(chip); |
246 | "ppi"); | 383 | return rc; |
247 | if (rc && rc != -ENOENT) { | ||
248 | tpm_chip_unregister(chip); | ||
249 | return rc; | ||
250 | } | ||
251 | } | 384 | } |
252 | 385 | ||
253 | return 0; | 386 | return 0; |
254 | out_err: | ||
255 | tpm1_chip_unregister(chip); | ||
256 | return rc; | ||
257 | } | 387 | } |
258 | EXPORT_SYMBOL_GPL(tpm_chip_register); | 388 | EXPORT_SYMBOL_GPL(tpm_chip_register); |
259 | 389 | ||
@@ -264,6 +394,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); | |||
264 | * Takes the chip first away from the list of available TPM chips and then | 394 | * Takes the chip first away from the list of available TPM chips and then |
265 | * cleans up all the resources reserved by tpm_chip_register(). | 395 | * cleans up all the resources reserved by tpm_chip_register(). |
266 | * | 396 | * |
397 | * Once this function returns the driver call backs in 'op's will not be | ||
398 | * running and will no longer start. | ||
399 | * | ||
267 | * NOTE: This function should be only called before deinitializing chip | 400 | * NOTE: This function should be only called before deinitializing chip |
268 | * resources. | 401 | * resources. |
269 | */ | 402 | */ |
@@ -272,13 +405,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) | |||
272 | if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED)) | 405 | if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED)) |
273 | return; | 406 | return; |
274 | 407 | ||
275 | spin_lock(&driver_lock); | 408 | tpm_del_legacy_sysfs(chip); |
276 | list_del_rcu(&chip->list); | ||
277 | spin_unlock(&driver_lock); | ||
278 | synchronize_rcu(); | ||
279 | |||
280 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) | ||
281 | sysfs_remove_link(&chip->pdev->kobj, "ppi"); | ||
282 | 409 | ||
283 | tpm1_chip_unregister(chip); | 410 | tpm1_chip_unregister(chip); |
284 | tpm_del_char_device(chip); | 411 | tpm_del_char_device(chip); |
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index de0337ebd658..f5d452151c6b 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c | |||
@@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file) | |||
61 | * by the check of is_open variable, which is protected | 61 | * by the check of is_open variable, which is protected |
62 | * by driver_lock. */ | 62 | * by driver_lock. */ |
63 | if (test_and_set_bit(0, &chip->is_open)) { | 63 | if (test_and_set_bit(0, &chip->is_open)) { |
64 | dev_dbg(chip->pdev, "Another process owns this TPM\n"); | 64 | dev_dbg(&chip->dev, "Another process owns this TPM\n"); |
65 | return -EBUSY; | 65 | return -EBUSY; |
66 | } | 66 | } |
67 | 67 | ||
@@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file) | |||
79 | INIT_WORK(&priv->work, timeout_work); | 79 | INIT_WORK(&priv->work, timeout_work); |
80 | 80 | ||
81 | file->private_data = priv; | 81 | file->private_data = priv; |
82 | get_device(chip->pdev); | ||
83 | return 0; | 82 | return 0; |
84 | } | 83 | } |
85 | 84 | ||
@@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, | |||
137 | return -EFAULT; | 136 | return -EFAULT; |
138 | } | 137 | } |
139 | 138 | ||
140 | /* atomic tpm command send and result receive */ | 139 | /* atomic tpm command send and result receive. We only hold the ops |
140 | * lock during this period so that the tpm can be unregistered even if | ||
141 | * the char dev is held open. | ||
142 | */ | ||
143 | if (tpm_try_get_ops(priv->chip)) { | ||
144 | mutex_unlock(&priv->buffer_mutex); | ||
145 | return -EPIPE; | ||
146 | } | ||
141 | out_size = tpm_transmit(priv->chip, priv->data_buffer, | 147 | out_size = tpm_transmit(priv->chip, priv->data_buffer, |
142 | sizeof(priv->data_buffer)); | 148 | sizeof(priv->data_buffer)); |
149 | |||
150 | tpm_put_ops(priv->chip); | ||
143 | if (out_size < 0) { | 151 | if (out_size < 0) { |
144 | mutex_unlock(&priv->buffer_mutex); | 152 | mutex_unlock(&priv->buffer_mutex); |
145 | return out_size; | 153 | return out_size; |
@@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file) | |||
166 | file->private_data = NULL; | 174 | file->private_data = NULL; |
167 | atomic_set(&priv->data_pending, 0); | 175 | atomic_set(&priv->data_pending, 0); |
168 | clear_bit(0, &priv->chip->is_open); | 176 | clear_bit(0, &priv->chip->is_open); |
169 | put_device(priv->chip->pdev); | ||
170 | kfree(priv); | 177 | kfree(priv); |
171 | return 0; | 178 | return 0; |
172 | } | 179 | } |
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index e2fa89c88304..1abe2d7a2610 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
@@ -319,7 +319,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, | |||
319 | duration_idx = tpm_ordinal_duration[ordinal]; | 319 | duration_idx = tpm_ordinal_duration[ordinal]; |
320 | 320 | ||
321 | if (duration_idx != TPM_UNDEFINED) | 321 | if (duration_idx != TPM_UNDEFINED) |
322 | duration = chip->vendor.duration[duration_idx]; | 322 | duration = chip->duration[duration_idx]; |
323 | if (duration <= 0) | 323 | if (duration <= 0) |
324 | return 2 * 60 * HZ; | 324 | return 2 * 60 * HZ; |
325 | else | 325 | else |
@@ -345,7 +345,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, | |||
345 | if (count == 0) | 345 | if (count == 0) |
346 | return -ENODATA; | 346 | return -ENODATA; |
347 | if (count > bufsiz) { | 347 | if (count > bufsiz) { |
348 | dev_err(chip->pdev, | 348 | dev_err(&chip->dev, |
349 | "invalid count value %x %zx\n", count, bufsiz); | 349 | "invalid count value %x %zx\n", count, bufsiz); |
350 | return -E2BIG; | 350 | return -E2BIG; |
351 | } | 351 | } |
@@ -354,12 +354,12 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, | |||
354 | 354 | ||
355 | rc = chip->ops->send(chip, (u8 *) buf, count); | 355 | rc = chip->ops->send(chip, (u8 *) buf, count); |
356 | if (rc < 0) { | 356 | if (rc < 0) { |
357 | dev_err(chip->pdev, | 357 | dev_err(&chip->dev, |
358 | "tpm_transmit: tpm_send: error %zd\n", rc); | 358 | "tpm_transmit: tpm_send: error %zd\n", rc); |
359 | goto out; | 359 | goto out; |
360 | } | 360 | } |
361 | 361 | ||
362 | if (chip->vendor.irq) | 362 | if (chip->flags & TPM_CHIP_FLAG_IRQ) |
363 | goto out_recv; | 363 | goto out_recv; |
364 | 364 | ||
365 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | 365 | if (chip->flags & TPM_CHIP_FLAG_TPM2) |
@@ -373,7 +373,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, | |||
373 | goto out_recv; | 373 | goto out_recv; |
374 | 374 | ||
375 | if (chip->ops->req_canceled(chip, status)) { | 375 | if (chip->ops->req_canceled(chip, status)) { |
376 | dev_err(chip->pdev, "Operation Canceled\n"); | 376 | dev_err(&chip->dev, "Operation Canceled\n"); |
377 | rc = -ECANCELED; | 377 | rc = -ECANCELED; |
378 | goto out; | 378 | goto out; |
379 | } | 379 | } |
@@ -383,14 +383,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, | |||
383 | } while (time_before(jiffies, stop)); | 383 | } while (time_before(jiffies, stop)); |
384 | 384 | ||
385 | chip->ops->cancel(chip); | 385 | chip->ops->cancel(chip); |
386 | dev_err(chip->pdev, "Operation Timed out\n"); | 386 | dev_err(&chip->dev, "Operation Timed out\n"); |
387 | rc = -ETIME; | 387 | rc = -ETIME; |
388 | goto out; | 388 | goto out; |
389 | 389 | ||
390 | out_recv: | 390 | out_recv: |
391 | rc = chip->ops->recv(chip, (u8 *) buf, bufsiz); | 391 | rc = chip->ops->recv(chip, (u8 *) buf, bufsiz); |
392 | if (rc < 0) | 392 | if (rc < 0) |
393 | dev_err(chip->pdev, | 393 | dev_err(&chip->dev, |
394 | "tpm_transmit: tpm_recv: error %zd\n", rc); | 394 | "tpm_transmit: tpm_recv: error %zd\n", rc); |
395 | out: | 395 | out: |
396 | mutex_unlock(&chip->tpm_mutex); | 396 | mutex_unlock(&chip->tpm_mutex); |
@@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, | |||
416 | 416 | ||
417 | err = be32_to_cpu(header->return_code); | 417 | err = be32_to_cpu(header->return_code); |
418 | if (err != 0 && desc) | 418 | if (err != 0 && desc) |
419 | dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err, | 419 | dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, |
420 | desc); | 420 | desc); |
421 | 421 | ||
422 | return err; | 422 | return err; |
@@ -432,12 +432,11 @@ static const struct tpm_input_header tpm_getcap_header = { | |||
432 | .ordinal = TPM_ORD_GET_CAP | 432 | .ordinal = TPM_ORD_GET_CAP |
433 | }; | 433 | }; |
434 | 434 | ||
435 | ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap, | 435 | ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap, |
436 | const char *desc) | 436 | const char *desc) |
437 | { | 437 | { |
438 | struct tpm_cmd_t tpm_cmd; | 438 | struct tpm_cmd_t tpm_cmd; |
439 | int rc; | 439 | int rc; |
440 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
441 | 440 | ||
442 | tpm_cmd.header.in = tpm_getcap_header; | 441 | tpm_cmd.header.in = tpm_getcap_header; |
443 | if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) { | 442 | if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) { |
@@ -505,15 +504,15 @@ int tpm_get_timeouts(struct tpm_chip *chip) | |||
505 | 504 | ||
506 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { | 505 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { |
507 | /* Fixed timeouts for TPM2 */ | 506 | /* Fixed timeouts for TPM2 */ |
508 | chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); | 507 | chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); |
509 | chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); | 508 | chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); |
510 | chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); | 509 | chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); |
511 | chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); | 510 | chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); |
512 | chip->vendor.duration[TPM_SHORT] = | 511 | chip->duration[TPM_SHORT] = |
513 | msecs_to_jiffies(TPM2_DURATION_SHORT); | 512 | msecs_to_jiffies(TPM2_DURATION_SHORT); |
514 | chip->vendor.duration[TPM_MEDIUM] = | 513 | chip->duration[TPM_MEDIUM] = |
515 | msecs_to_jiffies(TPM2_DURATION_MEDIUM); | 514 | msecs_to_jiffies(TPM2_DURATION_MEDIUM); |
516 | chip->vendor.duration[TPM_LONG] = | 515 | chip->duration[TPM_LONG] = |
517 | msecs_to_jiffies(TPM2_DURATION_LONG); | 516 | msecs_to_jiffies(TPM2_DURATION_LONG); |
518 | return 0; | 517 | return 0; |
519 | } | 518 | } |
@@ -527,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) | |||
527 | if (rc == TPM_ERR_INVALID_POSTINIT) { | 526 | if (rc == TPM_ERR_INVALID_POSTINIT) { |
528 | /* The TPM is not started, we are the first to talk to it. | 527 | /* The TPM is not started, we are the first to talk to it. |
529 | Execute a startup command. */ | 528 | Execute a startup command. */ |
530 | dev_info(chip->pdev, "Issuing TPM_STARTUP"); | 529 | dev_info(&chip->dev, "Issuing TPM_STARTUP"); |
531 | if (tpm_startup(chip, TPM_ST_CLEAR)) | 530 | if (tpm_startup(chip, TPM_ST_CLEAR)) |
532 | return rc; | 531 | return rc; |
533 | 532 | ||
@@ -539,7 +538,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) | |||
539 | NULL); | 538 | NULL); |
540 | } | 539 | } |
541 | if (rc) { | 540 | if (rc) { |
542 | dev_err(chip->pdev, | 541 | dev_err(&chip->dev, |
543 | "A TPM error (%zd) occurred attempting to determine the timeouts\n", | 542 | "A TPM error (%zd) occurred attempting to determine the timeouts\n", |
544 | rc); | 543 | rc); |
545 | goto duration; | 544 | goto duration; |
@@ -561,10 +560,10 @@ int tpm_get_timeouts(struct tpm_chip *chip) | |||
561 | * of misreporting. | 560 | * of misreporting. |
562 | */ | 561 | */ |
563 | if (chip->ops->update_timeouts != NULL) | 562 | if (chip->ops->update_timeouts != NULL) |
564 | chip->vendor.timeout_adjusted = | 563 | chip->timeout_adjusted = |
565 | chip->ops->update_timeouts(chip, new_timeout); | 564 | chip->ops->update_timeouts(chip, new_timeout); |
566 | 565 | ||
567 | if (!chip->vendor.timeout_adjusted) { | 566 | if (!chip->timeout_adjusted) { |
568 | /* Don't overwrite default if value is 0 */ | 567 | /* Don't overwrite default if value is 0 */ |
569 | if (new_timeout[0] != 0 && new_timeout[0] < 1000) { | 568 | if (new_timeout[0] != 0 && new_timeout[0] < 1000) { |
570 | int i; | 569 | int i; |
@@ -572,13 +571,13 @@ int tpm_get_timeouts(struct tpm_chip *chip) | |||
572 | /* timeouts in msec rather usec */ | 571 | /* timeouts in msec rather usec */ |
573 | for (i = 0; i != ARRAY_SIZE(new_timeout); i++) | 572 | for (i = 0; i != ARRAY_SIZE(new_timeout); i++) |
574 | new_timeout[i] *= 1000; | 573 | new_timeout[i] *= 1000; |
575 | chip->vendor.timeout_adjusted = true; | 574 | chip->timeout_adjusted = true; |
576 | } | 575 | } |
577 | } | 576 | } |
578 | 577 | ||
579 | /* Report adjusted timeouts */ | 578 | /* Report adjusted timeouts */ |
580 | if (chip->vendor.timeout_adjusted) { | 579 | if (chip->timeout_adjusted) { |
581 | dev_info(chip->pdev, | 580 | dev_info(&chip->dev, |
582 | HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", | 581 | HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", |
583 | old_timeout[0], new_timeout[0], | 582 | old_timeout[0], new_timeout[0], |
584 | old_timeout[1], new_timeout[1], | 583 | old_timeout[1], new_timeout[1], |
@@ -586,10 +585,10 @@ int tpm_get_timeouts(struct tpm_chip *chip) | |||
586 | old_timeout[3], new_timeout[3]); | 585 | old_timeout[3], new_timeout[3]); |
587 | } | 586 | } |
588 | 587 | ||
589 | chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]); | 588 | chip->timeout_a = usecs_to_jiffies(new_timeout[0]); |
590 | chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]); | 589 | chip->timeout_b = usecs_to_jiffies(new_timeout[1]); |
591 | chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]); | 590 | chip->timeout_c = usecs_to_jiffies(new_timeout[2]); |
592 | chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]); | 591 | chip->timeout_d = usecs_to_jiffies(new_timeout[3]); |
593 | 592 | ||
594 | duration: | 593 | duration: |
595 | tpm_cmd.header.in = tpm_getcap_header; | 594 | tpm_cmd.header.in = tpm_getcap_header; |
@@ -608,11 +607,11 @@ duration: | |||
608 | return -EINVAL; | 607 | return -EINVAL; |
609 | 608 | ||
610 | duration_cap = &tpm_cmd.params.getcap_out.cap.duration; | 609 | duration_cap = &tpm_cmd.params.getcap_out.cap.duration; |
611 | chip->vendor.duration[TPM_SHORT] = | 610 | chip->duration[TPM_SHORT] = |
612 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); | 611 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); |
613 | chip->vendor.duration[TPM_MEDIUM] = | 612 | chip->duration[TPM_MEDIUM] = |
614 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); | 613 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); |
615 | chip->vendor.duration[TPM_LONG] = | 614 | chip->duration[TPM_LONG] = |
616 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); | 615 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); |
617 | 616 | ||
618 | /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above | 617 | /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above |
@@ -620,12 +619,12 @@ duration: | |||
620 | * fix up the resulting too-small TPM_SHORT value to make things work. | 619 | * fix up the resulting too-small TPM_SHORT value to make things work. |
621 | * We also scale the TPM_MEDIUM and -_LONG values by 1000. | 620 | * We also scale the TPM_MEDIUM and -_LONG values by 1000. |
622 | */ | 621 | */ |
623 | if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) { | 622 | if (chip->duration[TPM_SHORT] < (HZ / 100)) { |
624 | chip->vendor.duration[TPM_SHORT] = HZ; | 623 | chip->duration[TPM_SHORT] = HZ; |
625 | chip->vendor.duration[TPM_MEDIUM] *= 1000; | 624 | chip->duration[TPM_MEDIUM] *= 1000; |
626 | chip->vendor.duration[TPM_LONG] *= 1000; | 625 | chip->duration[TPM_LONG] *= 1000; |
627 | chip->vendor.duration_adjusted = true; | 626 | chip->duration_adjusted = true; |
628 | dev_info(chip->pdev, "Adjusting TPM timeout parameters."); | 627 | dev_info(&chip->dev, "Adjusting TPM timeout parameters."); |
629 | } | 628 | } |
630 | return 0; | 629 | return 0; |
631 | } | 630 | } |
@@ -700,7 +699,7 @@ int tpm_is_tpm2(u32 chip_num) | |||
700 | 699 | ||
701 | rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; | 700 | rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; |
702 | 701 | ||
703 | tpm_chip_put(chip); | 702 | tpm_put_ops(chip); |
704 | 703 | ||
705 | return rc; | 704 | return rc; |
706 | } | 705 | } |
@@ -729,7 +728,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) | |||
729 | rc = tpm2_pcr_read(chip, pcr_idx, res_buf); | 728 | rc = tpm2_pcr_read(chip, pcr_idx, res_buf); |
730 | else | 729 | else |
731 | rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); | 730 | rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); |
732 | tpm_chip_put(chip); | 731 | tpm_put_ops(chip); |
733 | return rc; | 732 | return rc; |
734 | } | 733 | } |
735 | EXPORT_SYMBOL_GPL(tpm_pcr_read); | 734 | EXPORT_SYMBOL_GPL(tpm_pcr_read); |
@@ -764,7 +763,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) | |||
764 | 763 | ||
765 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { | 764 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { |
766 | rc = tpm2_pcr_extend(chip, pcr_idx, hash); | 765 | rc = tpm2_pcr_extend(chip, pcr_idx, hash); |
767 | tpm_chip_put(chip); | 766 | tpm_put_ops(chip); |
768 | return rc; | 767 | return rc; |
769 | } | 768 | } |
770 | 769 | ||
@@ -774,7 +773,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) | |||
774 | rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, | 773 | rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, |
775 | "attempting extend a PCR value"); | 774 | "attempting extend a PCR value"); |
776 | 775 | ||
777 | tpm_chip_put(chip); | 776 | tpm_put_ops(chip); |
778 | return rc; | 777 | return rc; |
779 | } | 778 | } |
780 | EXPORT_SYMBOL_GPL(tpm_pcr_extend); | 779 | EXPORT_SYMBOL_GPL(tpm_pcr_extend); |
@@ -815,7 +814,9 @@ int tpm_do_selftest(struct tpm_chip *chip) | |||
815 | * around 300ms while the self test is ongoing, keep trying | 814 | * around 300ms while the self test is ongoing, keep trying |
816 | * until the self test duration expires. */ | 815 | * until the self test duration expires. */ |
817 | if (rc == -ETIME) { | 816 | if (rc == -ETIME) { |
818 | dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test"); | 817 | dev_info( |
818 | &chip->dev, HW_ERR | ||
819 | "TPM command timed out during continue self test"); | ||
819 | msleep(delay_msec); | 820 | msleep(delay_msec); |
820 | continue; | 821 | continue; |
821 | } | 822 | } |
@@ -825,7 +826,7 @@ int tpm_do_selftest(struct tpm_chip *chip) | |||
825 | 826 | ||
826 | rc = be32_to_cpu(cmd.header.out.return_code); | 827 | rc = be32_to_cpu(cmd.header.out.return_code); |
827 | if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { | 828 | if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { |
828 | dev_info(chip->pdev, | 829 | dev_info(&chip->dev, |
829 | "TPM is disabled/deactivated (0x%X)\n", rc); | 830 | "TPM is disabled/deactivated (0x%X)\n", rc); |
830 | /* TPM is disabled and/or deactivated; driver can | 831 | /* TPM is disabled and/or deactivated; driver can |
831 | * proceed and TPM does handle commands for | 832 | * proceed and TPM does handle commands for |
@@ -842,6 +843,33 @@ int tpm_do_selftest(struct tpm_chip *chip) | |||
842 | } | 843 | } |
843 | EXPORT_SYMBOL_GPL(tpm_do_selftest); | 844 | EXPORT_SYMBOL_GPL(tpm_do_selftest); |
844 | 845 | ||
846 | /** | ||
847 | * tpm1_auto_startup - Perform the standard automatic TPM initialization | ||
848 | * sequence | ||
849 | * @chip: TPM chip to use | ||
850 | * | ||
851 | * Returns 0 on success, < 0 in case of fatal error. | ||
852 | */ | ||
853 | int tpm1_auto_startup(struct tpm_chip *chip) | ||
854 | { | ||
855 | int rc; | ||
856 | |||
857 | rc = tpm_get_timeouts(chip); | ||
858 | if (rc) | ||
859 | goto out; | ||
860 | rc = tpm_do_selftest(chip); | ||
861 | if (rc) { | ||
862 | dev_err(&chip->dev, "TPM self test failed\n"); | ||
863 | goto out; | ||
864 | } | ||
865 | |||
866 | return rc; | ||
867 | out: | ||
868 | if (rc > 0) | ||
869 | rc = -ENODEV; | ||
870 | return rc; | ||
871 | } | ||
872 | |||
845 | int tpm_send(u32 chip_num, void *cmd, size_t buflen) | 873 | int tpm_send(u32 chip_num, void *cmd, size_t buflen) |
846 | { | 874 | { |
847 | struct tpm_chip *chip; | 875 | struct tpm_chip *chip; |
@@ -853,7 +881,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen) | |||
853 | 881 | ||
854 | rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd"); | 882 | rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd"); |
855 | 883 | ||
856 | tpm_chip_put(chip); | 884 | tpm_put_ops(chip); |
857 | return rc; | 885 | return rc; |
858 | } | 886 | } |
859 | EXPORT_SYMBOL_GPL(tpm_send); | 887 | EXPORT_SYMBOL_GPL(tpm_send); |
@@ -888,7 +916,7 @@ int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, | |||
888 | 916 | ||
889 | stop = jiffies + timeout; | 917 | stop = jiffies + timeout; |
890 | 918 | ||
891 | if (chip->vendor.irq) { | 919 | if (chip->flags & TPM_CHIP_FLAG_IRQ) { |
892 | again: | 920 | again: |
893 | timeout = stop - jiffies; | 921 | timeout = stop - jiffies; |
894 | if ((long)timeout <= 0) | 922 | if ((long)timeout <= 0) |
@@ -978,10 +1006,10 @@ int tpm_pm_suspend(struct device *dev) | |||
978 | } | 1006 | } |
979 | 1007 | ||
980 | if (rc) | 1008 | if (rc) |
981 | dev_err(chip->pdev, | 1009 | dev_err(&chip->dev, |
982 | "Error (%d) sending savestate before suspend\n", rc); | 1010 | "Error (%d) sending savestate before suspend\n", rc); |
983 | else if (try > 0) | 1011 | else if (try > 0) |
984 | dev_warn(chip->pdev, "TPM savestate took %dms\n", | 1012 | dev_warn(&chip->dev, "TPM savestate took %dms\n", |
985 | try * TPM_TIMEOUT_RETRY); | 1013 | try * TPM_TIMEOUT_RETRY); |
986 | 1014 | ||
987 | return rc; | 1015 | return rc; |
@@ -1035,7 +1063,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) | |||
1035 | 1063 | ||
1036 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { | 1064 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { |
1037 | err = tpm2_get_random(chip, out, max); | 1065 | err = tpm2_get_random(chip, out, max); |
1038 | tpm_chip_put(chip); | 1066 | tpm_put_ops(chip); |
1039 | return err; | 1067 | return err; |
1040 | } | 1068 | } |
1041 | 1069 | ||
@@ -1057,7 +1085,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) | |||
1057 | num_bytes -= recd; | 1085 | num_bytes -= recd; |
1058 | } while (retries-- && total < max); | 1086 | } while (retries-- && total < max); |
1059 | 1087 | ||
1060 | tpm_chip_put(chip); | 1088 | tpm_put_ops(chip); |
1061 | return total ? total : -EIO; | 1089 | return total ? total : -EIO; |
1062 | } | 1090 | } |
1063 | EXPORT_SYMBOL_GPL(tpm_get_random); | 1091 | EXPORT_SYMBOL_GPL(tpm_get_random); |
@@ -1083,7 +1111,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload, | |||
1083 | 1111 | ||
1084 | rc = tpm2_seal_trusted(chip, payload, options); | 1112 | rc = tpm2_seal_trusted(chip, payload, options); |
1085 | 1113 | ||
1086 | tpm_chip_put(chip); | 1114 | tpm_put_ops(chip); |
1087 | return rc; | 1115 | return rc; |
1088 | } | 1116 | } |
1089 | EXPORT_SYMBOL_GPL(tpm_seal_trusted); | 1117 | EXPORT_SYMBOL_GPL(tpm_seal_trusted); |
@@ -1109,7 +1137,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload, | |||
1109 | 1137 | ||
1110 | rc = tpm2_unseal_trusted(chip, payload, options); | 1138 | rc = tpm2_unseal_trusted(chip, payload, options); |
1111 | 1139 | ||
1112 | tpm_chip_put(chip); | 1140 | tpm_put_ops(chip); |
1141 | |||
1113 | return rc; | 1142 | return rc; |
1114 | } | 1143 | } |
1115 | EXPORT_SYMBOL_GPL(tpm_unseal_trusted); | 1144 | EXPORT_SYMBOL_GPL(tpm_unseal_trusted); |
@@ -1136,6 +1165,7 @@ static int __init tpm_init(void) | |||
1136 | 1165 | ||
1137 | static void __exit tpm_exit(void) | 1166 | static void __exit tpm_exit(void) |
1138 | { | 1167 | { |
1168 | idr_destroy(&dev_nums_idr); | ||
1139 | class_destroy(tpm_class); | 1169 | class_destroy(tpm_class); |
1140 | unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES); | 1170 | unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES); |
1141 | } | 1171 | } |
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index ee66fd4673f3..b46cf70c8b16 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c | |||
@@ -36,7 +36,7 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, | |||
36 | int i, rc; | 36 | int i, rc; |
37 | char *str = buf; | 37 | char *str = buf; |
38 | 38 | ||
39 | struct tpm_chip *chip = dev_get_drvdata(dev); | 39 | struct tpm_chip *chip = to_tpm_chip(dev); |
40 | 40 | ||
41 | tpm_cmd.header.in = tpm_readpubek_header; | 41 | tpm_cmd.header.in = tpm_readpubek_header; |
42 | err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, | 42 | err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, |
@@ -92,9 +92,9 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, | |||
92 | ssize_t rc; | 92 | ssize_t rc; |
93 | int i, j, num_pcrs; | 93 | int i, j, num_pcrs; |
94 | char *str = buf; | 94 | char *str = buf; |
95 | struct tpm_chip *chip = dev_get_drvdata(dev); | 95 | struct tpm_chip *chip = to_tpm_chip(dev); |
96 | 96 | ||
97 | rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap, | 97 | rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap, |
98 | "attempting to determine the number of PCRS"); | 98 | "attempting to determine the number of PCRS"); |
99 | if (rc) | 99 | if (rc) |
100 | return 0; | 100 | return 0; |
@@ -119,8 +119,8 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, | |||
119 | cap_t cap; | 119 | cap_t cap; |
120 | ssize_t rc; | 120 | ssize_t rc; |
121 | 121 | ||
122 | rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, | 122 | rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, |
123 | "attempting to determine the permanent enabled state"); | 123 | "attempting to determine the permanent enabled state"); |
124 | if (rc) | 124 | if (rc) |
125 | return 0; | 125 | return 0; |
126 | 126 | ||
@@ -135,8 +135,8 @@ static ssize_t active_show(struct device *dev, struct device_attribute *attr, | |||
135 | cap_t cap; | 135 | cap_t cap; |
136 | ssize_t rc; | 136 | ssize_t rc; |
137 | 137 | ||
138 | rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, | 138 | rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, |
139 | "attempting to determine the permanent active state"); | 139 | "attempting to determine the permanent active state"); |
140 | if (rc) | 140 | if (rc) |
141 | return 0; | 141 | return 0; |
142 | 142 | ||
@@ -151,8 +151,8 @@ static ssize_t owned_show(struct device *dev, struct device_attribute *attr, | |||
151 | cap_t cap; | 151 | cap_t cap; |
152 | ssize_t rc; | 152 | ssize_t rc; |
153 | 153 | ||
154 | rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap, | 154 | rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, |
155 | "attempting to determine the owner state"); | 155 | "attempting to determine the owner state"); |
156 | if (rc) | 156 | if (rc) |
157 | return 0; | 157 | return 0; |
158 | 158 | ||
@@ -167,8 +167,8 @@ static ssize_t temp_deactivated_show(struct device *dev, | |||
167 | cap_t cap; | 167 | cap_t cap; |
168 | ssize_t rc; | 168 | ssize_t rc; |
169 | 169 | ||
170 | rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap, | 170 | rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, |
171 | "attempting to determine the temporary state"); | 171 | "attempting to determine the temporary state"); |
172 | if (rc) | 172 | if (rc) |
173 | return 0; | 173 | return 0; |
174 | 174 | ||
@@ -180,11 +180,12 @@ static DEVICE_ATTR_RO(temp_deactivated); | |||
180 | static ssize_t caps_show(struct device *dev, struct device_attribute *attr, | 180 | static ssize_t caps_show(struct device *dev, struct device_attribute *attr, |
181 | char *buf) | 181 | char *buf) |
182 | { | 182 | { |
183 | struct tpm_chip *chip = to_tpm_chip(dev); | ||
183 | cap_t cap; | 184 | cap_t cap; |
184 | ssize_t rc; | 185 | ssize_t rc; |
185 | char *str = buf; | 186 | char *str = buf; |
186 | 187 | ||
187 | rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, | 188 | rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, |
188 | "attempting to determine the manufacturer"); | 189 | "attempting to determine the manufacturer"); |
189 | if (rc) | 190 | if (rc) |
190 | return 0; | 191 | return 0; |
@@ -192,8 +193,8 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, | |||
192 | be32_to_cpu(cap.manufacturer_id)); | 193 | be32_to_cpu(cap.manufacturer_id)); |
193 | 194 | ||
194 | /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ | 195 | /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ |
195 | rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, | 196 | rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap, |
196 | "attempting to determine the 1.2 version"); | 197 | "attempting to determine the 1.2 version"); |
197 | if (!rc) { | 198 | if (!rc) { |
198 | str += sprintf(str, | 199 | str += sprintf(str, |
199 | "TCG version: %d.%d\nFirmware version: %d.%d\n", | 200 | "TCG version: %d.%d\nFirmware version: %d.%d\n", |
@@ -203,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, | |||
203 | cap.tpm_version_1_2.revMinor); | 204 | cap.tpm_version_1_2.revMinor); |
204 | } else { | 205 | } else { |
205 | /* Otherwise just use TPM_STRUCT_VER */ | 206 | /* Otherwise just use TPM_STRUCT_VER */ |
206 | rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, | 207 | rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap, |
207 | "attempting to determine the 1.1 version"); | 208 | "attempting to determine the 1.1 version"); |
208 | if (rc) | 209 | if (rc) |
209 | return 0; | 210 | return 0; |
@@ -222,7 +223,7 @@ static DEVICE_ATTR_RO(caps); | |||
222 | static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, | 223 | static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, |
223 | const char *buf, size_t count) | 224 | const char *buf, size_t count) |
224 | { | 225 | { |
225 | struct tpm_chip *chip = dev_get_drvdata(dev); | 226 | struct tpm_chip *chip = to_tpm_chip(dev); |
226 | if (chip == NULL) | 227 | if (chip == NULL) |
227 | return 0; | 228 | return 0; |
228 | 229 | ||
@@ -234,16 +235,16 @@ static DEVICE_ATTR_WO(cancel); | |||
234 | static ssize_t durations_show(struct device *dev, struct device_attribute *attr, | 235 | static ssize_t durations_show(struct device *dev, struct device_attribute *attr, |
235 | char *buf) | 236 | char *buf) |
236 | { | 237 | { |
237 | struct tpm_chip *chip = dev_get_drvdata(dev); | 238 | struct tpm_chip *chip = to_tpm_chip(dev); |
238 | 239 | ||
239 | if (chip->vendor.duration[TPM_LONG] == 0) | 240 | if (chip->duration[TPM_LONG] == 0) |
240 | return 0; | 241 | return 0; |
241 | 242 | ||
242 | return sprintf(buf, "%d %d %d [%s]\n", | 243 | return sprintf(buf, "%d %d %d [%s]\n", |
243 | jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), | 244 | jiffies_to_usecs(chip->duration[TPM_SHORT]), |
244 | jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), | 245 | jiffies_to_usecs(chip->duration[TPM_MEDIUM]), |
245 | jiffies_to_usecs(chip->vendor.duration[TPM_LONG]), | 246 | jiffies_to_usecs(chip->duration[TPM_LONG]), |
246 | chip->vendor.duration_adjusted | 247 | chip->duration_adjusted |
247 | ? "adjusted" : "original"); | 248 | ? "adjusted" : "original"); |
248 | } | 249 | } |
249 | static DEVICE_ATTR_RO(durations); | 250 | static DEVICE_ATTR_RO(durations); |
@@ -251,14 +252,14 @@ static DEVICE_ATTR_RO(durations); | |||
251 | static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, | 252 | static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, |
252 | char *buf) | 253 | char *buf) |
253 | { | 254 | { |
254 | struct tpm_chip *chip = dev_get_drvdata(dev); | 255 | struct tpm_chip *chip = to_tpm_chip(dev); |
255 | 256 | ||
256 | return sprintf(buf, "%d %d %d %d [%s]\n", | 257 | return sprintf(buf, "%d %d %d %d [%s]\n", |
257 | jiffies_to_usecs(chip->vendor.timeout_a), | 258 | jiffies_to_usecs(chip->timeout_a), |
258 | jiffies_to_usecs(chip->vendor.timeout_b), | 259 | jiffies_to_usecs(chip->timeout_b), |
259 | jiffies_to_usecs(chip->vendor.timeout_c), | 260 | jiffies_to_usecs(chip->timeout_c), |
260 | jiffies_to_usecs(chip->vendor.timeout_d), | 261 | jiffies_to_usecs(chip->timeout_d), |
261 | chip->vendor.timeout_adjusted | 262 | chip->timeout_adjusted |
262 | ? "adjusted" : "original"); | 263 | ? "adjusted" : "original"); |
263 | } | 264 | } |
264 | static DEVICE_ATTR_RO(timeouts); | 265 | static DEVICE_ATTR_RO(timeouts); |
@@ -281,19 +282,12 @@ static const struct attribute_group tpm_dev_group = { | |||
281 | .attrs = tpm_dev_attrs, | 282 | .attrs = tpm_dev_attrs, |
282 | }; | 283 | }; |
283 | 284 | ||
284 | int tpm_sysfs_add_device(struct tpm_chip *chip) | 285 | void tpm_sysfs_add_device(struct tpm_chip *chip) |
285 | { | 286 | { |
286 | int err; | 287 | /* The sysfs routines rely on an implicit tpm_try_get_ops, device_del |
287 | err = sysfs_create_group(&chip->pdev->kobj, | 288 | * is called before ops is null'd and the sysfs core synchronizes this |
288 | &tpm_dev_group); | 289 | * removal so that no callbacks are running or can run again |
289 | 290 | */ | |
290 | if (err) | 291 | WARN_ON(chip->groups_cnt != 0); |
291 | dev_err(chip->pdev, | 292 | chip->groups[chip->groups_cnt++] = &tpm_dev_group; |
292 | "failed to create sysfs attributes, %d\n", err); | ||
293 | return err; | ||
294 | } | ||
295 | |||
296 | void tpm_sysfs_del_device(struct tpm_chip *chip) | ||
297 | { | ||
298 | sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group); | ||
299 | } | 293 | } |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 28b477e8da6a..3e32d5bd2dc6 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
@@ -19,6 +19,10 @@ | |||
19 | * License. | 19 | * License. |
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | |||
23 | #ifndef __TPM_H__ | ||
24 | #define __TPM_H__ | ||
25 | |||
22 | #include <linux/module.h> | 26 | #include <linux/module.h> |
23 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
24 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
@@ -34,7 +38,7 @@ | |||
34 | enum tpm_const { | 38 | enum tpm_const { |
35 | TPM_MINOR = 224, /* officially assigned */ | 39 | TPM_MINOR = 224, /* officially assigned */ |
36 | TPM_BUFSIZE = 4096, | 40 | TPM_BUFSIZE = 4096, |
37 | TPM_NUM_DEVICES = 256, | 41 | TPM_NUM_DEVICES = 65536, |
38 | TPM_RETRY = 50, /* 5 seconds */ | 42 | TPM_RETRY = 50, /* 5 seconds */ |
39 | }; | 43 | }; |
40 | 44 | ||
@@ -128,33 +132,6 @@ enum tpm2_startup_types { | |||
128 | TPM2_SU_STATE = 0x0001, | 132 | TPM2_SU_STATE = 0x0001, |
129 | }; | 133 | }; |
130 | 134 | ||
131 | struct tpm_chip; | ||
132 | |||
133 | struct tpm_vendor_specific { | ||
134 | void __iomem *iobase; /* ioremapped address */ | ||
135 | unsigned long base; /* TPM base address */ | ||
136 | |||
137 | int irq; | ||
138 | |||
139 | int region_size; | ||
140 | int have_region; | ||
141 | |||
142 | struct list_head list; | ||
143 | int locality; | ||
144 | unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */ | ||
145 | bool timeout_adjusted; | ||
146 | unsigned long duration[3]; /* jiffies */ | ||
147 | bool duration_adjusted; | ||
148 | void *priv; | ||
149 | |||
150 | wait_queue_head_t read_queue; | ||
151 | wait_queue_head_t int_queue; | ||
152 | |||
153 | u16 manufacturer_id; | ||
154 | }; | ||
155 | |||
156 | #define TPM_VPRIV(c) ((c)->vendor.priv) | ||
157 | |||
158 | #define TPM_VID_INTEL 0x8086 | 135 | #define TPM_VID_INTEL 0x8086 |
159 | #define TPM_VID_WINBOND 0x1050 | 136 | #define TPM_VID_WINBOND 0x1050 |
160 | #define TPM_VID_STM 0x104A | 137 | #define TPM_VID_STM 0x104A |
@@ -164,44 +141,48 @@ struct tpm_vendor_specific { | |||
164 | enum tpm_chip_flags { | 141 | enum tpm_chip_flags { |
165 | TPM_CHIP_FLAG_REGISTERED = BIT(0), | 142 | TPM_CHIP_FLAG_REGISTERED = BIT(0), |
166 | TPM_CHIP_FLAG_TPM2 = BIT(1), | 143 | TPM_CHIP_FLAG_TPM2 = BIT(1), |
144 | TPM_CHIP_FLAG_IRQ = BIT(2), | ||
145 | TPM_CHIP_FLAG_VIRTUAL = BIT(3), | ||
167 | }; | 146 | }; |
168 | 147 | ||
169 | struct tpm_chip { | 148 | struct tpm_chip { |
170 | struct device *pdev; /* Device stuff */ | ||
171 | struct device dev; | 149 | struct device dev; |
172 | struct cdev cdev; | 150 | struct cdev cdev; |
173 | 151 | ||
152 | /* A driver callback under ops cannot be run unless ops_sem is held | ||
153 | * (sometimes implicitly, eg for the sysfs code). ops becomes null | ||
154 | * when the driver is unregistered, see tpm_try_get_ops. | ||
155 | */ | ||
156 | struct rw_semaphore ops_sem; | ||
174 | const struct tpm_class_ops *ops; | 157 | const struct tpm_class_ops *ops; |
158 | |||
175 | unsigned int flags; | 159 | unsigned int flags; |
176 | 160 | ||
177 | int dev_num; /* /dev/tpm# */ | 161 | int dev_num; /* /dev/tpm# */ |
178 | char devname[7]; | ||
179 | unsigned long is_open; /* only one allowed */ | 162 | unsigned long is_open; /* only one allowed */ |
180 | int time_expired; | ||
181 | 163 | ||
182 | struct mutex tpm_mutex; /* tpm is processing */ | 164 | struct mutex tpm_mutex; /* tpm is processing */ |
183 | 165 | ||
184 | struct tpm_vendor_specific vendor; | 166 | unsigned long timeout_a; /* jiffies */ |
167 | unsigned long timeout_b; /* jiffies */ | ||
168 | unsigned long timeout_c; /* jiffies */ | ||
169 | unsigned long timeout_d; /* jiffies */ | ||
170 | bool timeout_adjusted; | ||
171 | unsigned long duration[3]; /* jiffies */ | ||
172 | bool duration_adjusted; | ||
185 | 173 | ||
186 | struct dentry **bios_dir; | 174 | struct dentry **bios_dir; |
187 | 175 | ||
188 | #ifdef CONFIG_ACPI | 176 | const struct attribute_group *groups[3]; |
189 | const struct attribute_group *groups[2]; | ||
190 | unsigned int groups_cnt; | 177 | unsigned int groups_cnt; |
178 | #ifdef CONFIG_ACPI | ||
191 | acpi_handle acpi_dev_handle; | 179 | acpi_handle acpi_dev_handle; |
192 | char ppi_version[TPM_PPI_VERSION_LEN + 1]; | 180 | char ppi_version[TPM_PPI_VERSION_LEN + 1]; |
193 | #endif /* CONFIG_ACPI */ | 181 | #endif /* CONFIG_ACPI */ |
194 | |||
195 | struct list_head list; | ||
196 | }; | 182 | }; |
197 | 183 | ||
198 | #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) | 184 | #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) |
199 | 185 | ||
200 | static inline void tpm_chip_put(struct tpm_chip *chip) | ||
201 | { | ||
202 | module_put(chip->pdev->driver->owner); | ||
203 | } | ||
204 | |||
205 | static inline int tpm_read_index(int base, int index) | 186 | static inline int tpm_read_index(int base, int index) |
206 | { | 187 | { |
207 | outb(index, base); | 188 | outb(index, base); |
@@ -493,14 +474,17 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) | |||
493 | extern struct class *tpm_class; | 474 | extern struct class *tpm_class; |
494 | extern dev_t tpm_devt; | 475 | extern dev_t tpm_devt; |
495 | extern const struct file_operations tpm_fops; | 476 | extern const struct file_operations tpm_fops; |
477 | extern struct idr dev_nums_idr; | ||
496 | 478 | ||
497 | ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *); | 479 | ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap, |
480 | const char *desc); | ||
498 | ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, | 481 | ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, |
499 | size_t bufsiz); | 482 | size_t bufsiz); |
500 | ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len, | 483 | ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len, |
501 | const char *desc); | 484 | const char *desc); |
502 | extern int tpm_get_timeouts(struct tpm_chip *); | 485 | extern int tpm_get_timeouts(struct tpm_chip *); |
503 | extern void tpm_gen_interrupt(struct tpm_chip *); | 486 | extern void tpm_gen_interrupt(struct tpm_chip *); |
487 | int tpm1_auto_startup(struct tpm_chip *chip); | ||
504 | extern int tpm_do_selftest(struct tpm_chip *); | 488 | extern int tpm_do_selftest(struct tpm_chip *); |
505 | extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32); | 489 | extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32); |
506 | extern int tpm_pm_suspend(struct device *); | 490 | extern int tpm_pm_suspend(struct device *); |
@@ -509,13 +493,17 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, | |||
509 | wait_queue_head_t *, bool); | 493 | wait_queue_head_t *, bool); |
510 | 494 | ||
511 | struct tpm_chip *tpm_chip_find_get(int chip_num); | 495 | struct tpm_chip *tpm_chip_find_get(int chip_num); |
512 | extern struct tpm_chip *tpmm_chip_alloc(struct device *dev, | 496 | __must_check int tpm_try_get_ops(struct tpm_chip *chip); |
497 | void tpm_put_ops(struct tpm_chip *chip); | ||
498 | |||
499 | extern struct tpm_chip *tpm_chip_alloc(struct device *dev, | ||
500 | const struct tpm_class_ops *ops); | ||
501 | extern struct tpm_chip *tpmm_chip_alloc(struct device *pdev, | ||
513 | const struct tpm_class_ops *ops); | 502 | const struct tpm_class_ops *ops); |
514 | extern int tpm_chip_register(struct tpm_chip *chip); | 503 | extern int tpm_chip_register(struct tpm_chip *chip); |
515 | extern void tpm_chip_unregister(struct tpm_chip *chip); | 504 | extern void tpm_chip_unregister(struct tpm_chip *chip); |
516 | 505 | ||
517 | int tpm_sysfs_add_device(struct tpm_chip *chip); | 506 | void tpm_sysfs_add_device(struct tpm_chip *chip); |
518 | void tpm_sysfs_del_device(struct tpm_chip *chip); | ||
519 | 507 | ||
520 | int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); | 508 | int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); |
521 | 509 | ||
@@ -539,9 +527,9 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, | |||
539 | ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, | 527 | ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, |
540 | u32 *value, const char *desc); | 528 | u32 *value, const char *desc); |
541 | 529 | ||
542 | extern int tpm2_startup(struct tpm_chip *chip, u16 startup_type); | 530 | int tpm2_auto_startup(struct tpm_chip *chip); |
543 | extern void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); | 531 | extern void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); |
544 | extern unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *, u32); | 532 | extern unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *, u32); |
545 | extern int tpm2_do_selftest(struct tpm_chip *chip); | ||
546 | extern int tpm2_gen_interrupt(struct tpm_chip *chip); | 533 | extern int tpm2_gen_interrupt(struct tpm_chip *chip); |
547 | extern int tpm2_probe(struct tpm_chip *chip); | 534 | extern int tpm2_probe(struct tpm_chip *chip); |
535 | #endif | ||
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index b28e4da3d2cf..08c7e23ed535 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c | |||
@@ -597,7 +597,7 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle) | |||
597 | 597 | ||
598 | rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT); | 598 | rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT); |
599 | if (rc) { | 599 | if (rc) { |
600 | dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n", | 600 | dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n", |
601 | handle); | 601 | handle); |
602 | return; | 602 | return; |
603 | } | 603 | } |
@@ -606,7 +606,7 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle) | |||
606 | 606 | ||
607 | rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "flushing context"); | 607 | rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "flushing context"); |
608 | if (rc) | 608 | if (rc) |
609 | dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle, | 609 | dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle, |
610 | rc); | 610 | rc); |
611 | 611 | ||
612 | tpm_buf_destroy(&buf); | 612 | tpm_buf_destroy(&buf); |
@@ -703,7 +703,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, | |||
703 | 703 | ||
704 | rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc); | 704 | rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc); |
705 | if (!rc) | 705 | if (!rc) |
706 | *value = cmd.params.get_tpm_pt_out.value; | 706 | *value = be32_to_cpu(cmd.params.get_tpm_pt_out.value); |
707 | 707 | ||
708 | return rc; | 708 | return rc; |
709 | } | 709 | } |
@@ -728,7 +728,7 @@ static const struct tpm_input_header tpm2_startup_header = { | |||
728 | * returned it remarks a POSIX error code. If a positive number is returned | 728 | * returned it remarks a POSIX error code. If a positive number is returned |
729 | * it remarks a TPM error. | 729 | * it remarks a TPM error. |
730 | */ | 730 | */ |
731 | int tpm2_startup(struct tpm_chip *chip, u16 startup_type) | 731 | static int tpm2_startup(struct tpm_chip *chip, u16 startup_type) |
732 | { | 732 | { |
733 | struct tpm2_cmd cmd; | 733 | struct tpm2_cmd cmd; |
734 | 734 | ||
@@ -738,7 +738,6 @@ int tpm2_startup(struct tpm_chip *chip, u16 startup_type) | |||
738 | return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), | 738 | return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), |
739 | "attempting to start the TPM"); | 739 | "attempting to start the TPM"); |
740 | } | 740 | } |
741 | EXPORT_SYMBOL_GPL(tpm2_startup); | ||
742 | 741 | ||
743 | #define TPM2_SHUTDOWN_IN_SIZE \ | 742 | #define TPM2_SHUTDOWN_IN_SIZE \ |
744 | (sizeof(struct tpm_input_header) + \ | 743 | (sizeof(struct tpm_input_header) + \ |
@@ -770,10 +769,9 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) | |||
770 | * except print the error code on a system failure. | 769 | * except print the error code on a system failure. |
771 | */ | 770 | */ |
772 | if (rc < 0) | 771 | if (rc < 0) |
773 | dev_warn(chip->pdev, "transmit returned %d while stopping the TPM", | 772 | dev_warn(&chip->dev, "transmit returned %d while stopping the TPM", |
774 | rc); | 773 | rc); |
775 | } | 774 | } |
776 | EXPORT_SYMBOL_GPL(tpm2_shutdown); | ||
777 | 775 | ||
778 | /* | 776 | /* |
779 | * tpm2_calc_ordinal_duration() - maximum duration for a command | 777 | * tpm2_calc_ordinal_duration() - maximum duration for a command |
@@ -793,7 +791,7 @@ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) | |||
793 | index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST]; | 791 | index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST]; |
794 | 792 | ||
795 | if (index != TPM_UNDEFINED) | 793 | if (index != TPM_UNDEFINED) |
796 | duration = chip->vendor.duration[index]; | 794 | duration = chip->duration[index]; |
797 | 795 | ||
798 | if (duration <= 0) | 796 | if (duration <= 0) |
799 | duration = 2 * 60 * HZ; | 797 | duration = 2 * 60 * HZ; |
@@ -837,7 +835,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) | |||
837 | * immediately. This is a workaround for that. | 835 | * immediately. This is a workaround for that. |
838 | */ | 836 | */ |
839 | if (rc == TPM2_RC_TESTING) { | 837 | if (rc == TPM2_RC_TESTING) { |
840 | dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n"); | 838 | dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n"); |
841 | rc = 0; | 839 | rc = 0; |
842 | } | 840 | } |
843 | 841 | ||
@@ -855,7 +853,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) | |||
855 | * returned it remarks a POSIX error code. If a positive number is returned | 853 | * returned it remarks a POSIX error code. If a positive number is returned |
856 | * it remarks a TPM error. | 854 | * it remarks a TPM error. |
857 | */ | 855 | */ |
858 | int tpm2_do_selftest(struct tpm_chip *chip) | 856 | static int tpm2_do_selftest(struct tpm_chip *chip) |
859 | { | 857 | { |
860 | int rc; | 858 | int rc; |
861 | unsigned int loops; | 859 | unsigned int loops; |
@@ -895,7 +893,6 @@ int tpm2_do_selftest(struct tpm_chip *chip) | |||
895 | 893 | ||
896 | return rc; | 894 | return rc; |
897 | } | 895 | } |
898 | EXPORT_SYMBOL_GPL(tpm2_do_selftest); | ||
899 | 896 | ||
900 | /** | 897 | /** |
901 | * tpm2_gen_interrupt() - generate an interrupt | 898 | * tpm2_gen_interrupt() - generate an interrupt |
@@ -943,3 +940,43 @@ int tpm2_probe(struct tpm_chip *chip) | |||
943 | return 0; | 940 | return 0; |
944 | } | 941 | } |
945 | EXPORT_SYMBOL_GPL(tpm2_probe); | 942 | EXPORT_SYMBOL_GPL(tpm2_probe); |
943 | |||
944 | /** | ||
945 | * tpm2_auto_startup - Perform the standard automatic TPM initialization | ||
946 | * sequence | ||
947 | * @chip: TPM chip to use | ||
948 | * | ||
949 | * Returns 0 on success, < 0 in case of fatal error. | ||
950 | */ | ||
951 | int tpm2_auto_startup(struct tpm_chip *chip) | ||
952 | { | ||
953 | int rc; | ||
954 | |||
955 | rc = tpm_get_timeouts(chip); | ||
956 | if (rc) | ||
957 | goto out; | ||
958 | |||
959 | rc = tpm2_do_selftest(chip); | ||
960 | if (rc != TPM2_RC_INITIALIZE) { | ||
961 | dev_err(&chip->dev, "TPM self test failed\n"); | ||
962 | goto out; | ||
963 | } | ||
964 | |||
965 | if (rc == TPM2_RC_INITIALIZE) { | ||
966 | rc = tpm2_startup(chip, TPM2_SU_CLEAR); | ||
967 | if (rc) | ||
968 | goto out; | ||
969 | |||
970 | rc = tpm2_do_selftest(chip); | ||
971 | if (rc) { | ||
972 | dev_err(&chip->dev, "TPM self test failed\n"); | ||
973 | goto out; | ||
974 | } | ||
975 | } | ||
976 | |||
977 | return rc; | ||
978 | out: | ||
979 | if (rc > 0) | ||
980 | rc = -ENODEV; | ||
981 | return rc; | ||
982 | } | ||
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index dfadad0916a1..0d322ab11faa 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c | |||
@@ -37,6 +37,7 @@ enum tpm_atmel_read_status { | |||
37 | 37 | ||
38 | static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 38 | static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
39 | { | 39 | { |
40 | struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); | ||
40 | u8 status, *hdr = buf; | 41 | u8 status, *hdr = buf; |
41 | u32 size; | 42 | u32 size; |
42 | int i; | 43 | int i; |
@@ -47,12 +48,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
47 | return -EIO; | 48 | return -EIO; |
48 | 49 | ||
49 | for (i = 0; i < 6; i++) { | 50 | for (i = 0; i < 6; i++) { |
50 | status = ioread8(chip->vendor.iobase + 1); | 51 | status = ioread8(priv->iobase + 1); |
51 | if ((status & ATML_STATUS_DATA_AVAIL) == 0) { | 52 | if ((status & ATML_STATUS_DATA_AVAIL) == 0) { |
52 | dev_err(chip->pdev, "error reading header\n"); | 53 | dev_err(&chip->dev, "error reading header\n"); |
53 | return -EIO; | 54 | return -EIO; |
54 | } | 55 | } |
55 | *buf++ = ioread8(chip->vendor.iobase); | 56 | *buf++ = ioread8(priv->iobase); |
56 | } | 57 | } |
57 | 58 | ||
58 | /* size of the data received */ | 59 | /* size of the data received */ |
@@ -60,12 +61,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
60 | size = be32_to_cpu(*native_size); | 61 | size = be32_to_cpu(*native_size); |
61 | 62 | ||
62 | if (count < size) { | 63 | if (count < size) { |
63 | dev_err(chip->pdev, | 64 | dev_err(&chip->dev, |
64 | "Recv size(%d) less than available space\n", size); | 65 | "Recv size(%d) less than available space\n", size); |
65 | for (; i < size; i++) { /* clear the waiting data anyway */ | 66 | for (; i < size; i++) { /* clear the waiting data anyway */ |
66 | status = ioread8(chip->vendor.iobase + 1); | 67 | status = ioread8(priv->iobase + 1); |
67 | if ((status & ATML_STATUS_DATA_AVAIL) == 0) { | 68 | if ((status & ATML_STATUS_DATA_AVAIL) == 0) { |
68 | dev_err(chip->pdev, "error reading data\n"); | 69 | dev_err(&chip->dev, "error reading data\n"); |
69 | return -EIO; | 70 | return -EIO; |
70 | } | 71 | } |
71 | } | 72 | } |
@@ -74,19 +75,19 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
74 | 75 | ||
75 | /* read all the data available */ | 76 | /* read all the data available */ |
76 | for (; i < size; i++) { | 77 | for (; i < size; i++) { |
77 | status = ioread8(chip->vendor.iobase + 1); | 78 | status = ioread8(priv->iobase + 1); |
78 | if ((status & ATML_STATUS_DATA_AVAIL) == 0) { | 79 | if ((status & ATML_STATUS_DATA_AVAIL) == 0) { |
79 | dev_err(chip->pdev, "error reading data\n"); | 80 | dev_err(&chip->dev, "error reading data\n"); |
80 | return -EIO; | 81 | return -EIO; |
81 | } | 82 | } |
82 | *buf++ = ioread8(chip->vendor.iobase); | 83 | *buf++ = ioread8(priv->iobase); |
83 | } | 84 | } |
84 | 85 | ||
85 | /* make sure data available is gone */ | 86 | /* make sure data available is gone */ |
86 | status = ioread8(chip->vendor.iobase + 1); | 87 | status = ioread8(priv->iobase + 1); |
87 | 88 | ||
88 | if (status & ATML_STATUS_DATA_AVAIL) { | 89 | if (status & ATML_STATUS_DATA_AVAIL) { |
89 | dev_err(chip->pdev, "data available is stuck\n"); | 90 | dev_err(&chip->dev, "data available is stuck\n"); |
90 | return -EIO; | 91 | return -EIO; |
91 | } | 92 | } |
92 | 93 | ||
@@ -95,12 +96,13 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
95 | 96 | ||
96 | static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) | 97 | static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) |
97 | { | 98 | { |
99 | struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); | ||
98 | int i; | 100 | int i; |
99 | 101 | ||
100 | dev_dbg(chip->pdev, "tpm_atml_send:\n"); | 102 | dev_dbg(&chip->dev, "tpm_atml_send:\n"); |
101 | for (i = 0; i < count; i++) { | 103 | for (i = 0; i < count; i++) { |
102 | dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); | 104 | dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); |
103 | iowrite8(buf[i], chip->vendor.iobase); | 105 | iowrite8(buf[i], priv->iobase); |
104 | } | 106 | } |
105 | 107 | ||
106 | return count; | 108 | return count; |
@@ -108,12 +110,16 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
108 | 110 | ||
109 | static void tpm_atml_cancel(struct tpm_chip *chip) | 111 | static void tpm_atml_cancel(struct tpm_chip *chip) |
110 | { | 112 | { |
111 | iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1); | 113 | struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); |
114 | |||
115 | iowrite8(ATML_STATUS_ABORT, priv->iobase + 1); | ||
112 | } | 116 | } |
113 | 117 | ||
114 | static u8 tpm_atml_status(struct tpm_chip *chip) | 118 | static u8 tpm_atml_status(struct tpm_chip *chip) |
115 | { | 119 | { |
116 | return ioread8(chip->vendor.iobase + 1); | 120 | struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); |
121 | |||
122 | return ioread8(priv->iobase + 1); | ||
117 | } | 123 | } |
118 | 124 | ||
119 | static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status) | 125 | static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status) |
@@ -136,13 +142,13 @@ static struct platform_device *pdev; | |||
136 | static void atml_plat_remove(void) | 142 | static void atml_plat_remove(void) |
137 | { | 143 | { |
138 | struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); | 144 | struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); |
145 | struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); | ||
139 | 146 | ||
140 | if (chip) { | 147 | if (chip) { |
141 | tpm_chip_unregister(chip); | 148 | tpm_chip_unregister(chip); |
142 | if (chip->vendor.have_region) | 149 | if (priv->have_region) |
143 | atmel_release_region(chip->vendor.base, | 150 | atmel_release_region(priv->base, priv->region_size); |
144 | chip->vendor.region_size); | 151 | atmel_put_base_addr(priv->iobase); |
145 | atmel_put_base_addr(chip->vendor.iobase); | ||
146 | platform_device_unregister(pdev); | 152 | platform_device_unregister(pdev); |
147 | } | 153 | } |
148 | } | 154 | } |
@@ -163,6 +169,7 @@ static int __init init_atmel(void) | |||
163 | int have_region, region_size; | 169 | int have_region, region_size; |
164 | unsigned long base; | 170 | unsigned long base; |
165 | struct tpm_chip *chip; | 171 | struct tpm_chip *chip; |
172 | struct tpm_atmel_priv *priv; | ||
166 | 173 | ||
167 | rc = platform_driver_register(&atml_drv); | 174 | rc = platform_driver_register(&atml_drv); |
168 | if (rc) | 175 | if (rc) |
@@ -183,16 +190,24 @@ static int __init init_atmel(void) | |||
183 | goto err_rel_reg; | 190 | goto err_rel_reg; |
184 | } | 191 | } |
185 | 192 | ||
193 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | ||
194 | if (!priv) { | ||
195 | rc = -ENOMEM; | ||
196 | goto err_unreg_dev; | ||
197 | } | ||
198 | |||
199 | priv->iobase = iobase; | ||
200 | priv->base = base; | ||
201 | priv->have_region = have_region; | ||
202 | priv->region_size = region_size; | ||
203 | |||
186 | chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel); | 204 | chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel); |
187 | if (IS_ERR(chip)) { | 205 | if (IS_ERR(chip)) { |
188 | rc = PTR_ERR(chip); | 206 | rc = PTR_ERR(chip); |
189 | goto err_unreg_dev; | 207 | goto err_unreg_dev; |
190 | } | 208 | } |
191 | 209 | ||
192 | chip->vendor.iobase = iobase; | 210 | dev_set_drvdata(&chip->dev, priv); |
193 | chip->vendor.base = base; | ||
194 | chip->vendor.have_region = have_region; | ||
195 | chip->vendor.region_size = region_size; | ||
196 | 211 | ||
197 | rc = tpm_chip_register(chip); | 212 | rc = tpm_chip_register(chip); |
198 | if (rc) | 213 | if (rc) |
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h index 6c831f9466b7..4f96d80cdce9 100644 --- a/drivers/char/tpm/tpm_atmel.h +++ b/drivers/char/tpm/tpm_atmel.h | |||
@@ -22,12 +22,19 @@ | |||
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | struct tpm_atmel_priv { | ||
26 | int region_size; | ||
27 | int have_region; | ||
28 | unsigned long base; | ||
29 | void __iomem *iobase; | ||
30 | }; | ||
31 | |||
25 | #ifdef CONFIG_PPC64 | 32 | #ifdef CONFIG_PPC64 |
26 | 33 | ||
27 | #include <asm/prom.h> | 34 | #include <asm/prom.h> |
28 | 35 | ||
29 | #define atmel_getb(chip, offset) readb(chip->vendor->iobase + offset); | 36 | #define atmel_getb(priv, offset) readb(priv->iobase + offset) |
30 | #define atmel_putb(val, chip, offset) writeb(val, chip->vendor->iobase + offset) | 37 | #define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset) |
31 | #define atmel_request_region request_mem_region | 38 | #define atmel_request_region request_mem_region |
32 | #define atmel_release_region release_mem_region | 39 | #define atmel_release_region release_mem_region |
33 | 40 | ||
@@ -78,8 +85,9 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) | |||
78 | return ioremap(*base, *region_size); | 85 | return ioremap(*base, *region_size); |
79 | } | 86 | } |
80 | #else | 87 | #else |
81 | #define atmel_getb(chip, offset) inb(chip->vendor->base + offset) | 88 | #define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset) |
82 | #define atmel_putb(val, chip, offset) outb(val, chip->vendor->base + offset) | 89 | #define atmel_putb(val, chip, offset) \ |
90 | outb(val, atmel_get_priv(chip)->base + offset) | ||
83 | #define atmel_request_region request_region | 91 | #define atmel_request_region request_region |
84 | #define atmel_release_region release_region | 92 | #define atmel_release_region release_region |
85 | /* Atmel definitions */ | 93 | /* Atmel definitions */ |
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index a12b31940344..018c382554ba 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c | |||
@@ -77,7 +77,6 @@ enum crb_flags { | |||
77 | 77 | ||
78 | struct crb_priv { | 78 | struct crb_priv { |
79 | unsigned int flags; | 79 | unsigned int flags; |
80 | struct resource res; | ||
81 | void __iomem *iobase; | 80 | void __iomem *iobase; |
82 | struct crb_control_area __iomem *cca; | 81 | struct crb_control_area __iomem *cca; |
83 | u8 __iomem *cmd; | 82 | u8 __iomem *cmd; |
@@ -88,7 +87,7 @@ static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume); | |||
88 | 87 | ||
89 | static u8 crb_status(struct tpm_chip *chip) | 88 | static u8 crb_status(struct tpm_chip *chip) |
90 | { | 89 | { |
91 | struct crb_priv *priv = chip->vendor.priv; | 90 | struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
92 | u8 sts = 0; | 91 | u8 sts = 0; |
93 | 92 | ||
94 | if ((ioread32(&priv->cca->start) & CRB_START_INVOKE) != | 93 | if ((ioread32(&priv->cca->start) & CRB_START_INVOKE) != |
@@ -100,7 +99,7 @@ static u8 crb_status(struct tpm_chip *chip) | |||
100 | 99 | ||
101 | static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 100 | static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
102 | { | 101 | { |
103 | struct crb_priv *priv = chip->vendor.priv; | 102 | struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
104 | unsigned int expected; | 103 | unsigned int expected; |
105 | 104 | ||
106 | /* sanity check */ | 105 | /* sanity check */ |
@@ -140,7 +139,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip) | |||
140 | 139 | ||
141 | static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) | 140 | static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) |
142 | { | 141 | { |
143 | struct crb_priv *priv = chip->vendor.priv; | 142 | struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
144 | int rc = 0; | 143 | int rc = 0; |
145 | 144 | ||
146 | if (len > ioread32(&priv->cca->cmd_size)) { | 145 | if (len > ioread32(&priv->cca->cmd_size)) { |
@@ -167,7 +166,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
167 | 166 | ||
168 | static void crb_cancel(struct tpm_chip *chip) | 167 | static void crb_cancel(struct tpm_chip *chip) |
169 | { | 168 | { |
170 | struct crb_priv *priv = chip->vendor.priv; | 169 | struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
171 | 170 | ||
172 | iowrite32(cpu_to_le32(CRB_CANCEL_INVOKE), &priv->cca->cancel); | 171 | iowrite32(cpu_to_le32(CRB_CANCEL_INVOKE), &priv->cca->cancel); |
173 | 172 | ||
@@ -182,13 +181,14 @@ static void crb_cancel(struct tpm_chip *chip) | |||
182 | 181 | ||
183 | static bool crb_req_canceled(struct tpm_chip *chip, u8 status) | 182 | static bool crb_req_canceled(struct tpm_chip *chip, u8 status) |
184 | { | 183 | { |
185 | struct crb_priv *priv = chip->vendor.priv; | 184 | struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
186 | u32 cancel = ioread32(&priv->cca->cancel); | 185 | u32 cancel = ioread32(&priv->cca->cancel); |
187 | 186 | ||
188 | return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; | 187 | return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; |
189 | } | 188 | } |
190 | 189 | ||
191 | static const struct tpm_class_ops tpm_crb = { | 190 | static const struct tpm_class_ops tpm_crb = { |
191 | .flags = TPM_OPS_AUTO_STARTUP, | ||
192 | .status = crb_status, | 192 | .status = crb_status, |
193 | .recv = crb_recv, | 193 | .recv = crb_recv, |
194 | .send = crb_send, | 194 | .send = crb_send, |
@@ -201,42 +201,33 @@ static const struct tpm_class_ops tpm_crb = { | |||
201 | static int crb_init(struct acpi_device *device, struct crb_priv *priv) | 201 | static int crb_init(struct acpi_device *device, struct crb_priv *priv) |
202 | { | 202 | { |
203 | struct tpm_chip *chip; | 203 | struct tpm_chip *chip; |
204 | int rc; | ||
205 | 204 | ||
206 | chip = tpmm_chip_alloc(&device->dev, &tpm_crb); | 205 | chip = tpmm_chip_alloc(&device->dev, &tpm_crb); |
207 | if (IS_ERR(chip)) | 206 | if (IS_ERR(chip)) |
208 | return PTR_ERR(chip); | 207 | return PTR_ERR(chip); |
209 | 208 | ||
210 | chip->vendor.priv = priv; | 209 | dev_set_drvdata(&chip->dev, priv); |
211 | chip->acpi_dev_handle = device->handle; | 210 | chip->acpi_dev_handle = device->handle; |
212 | chip->flags = TPM_CHIP_FLAG_TPM2; | 211 | chip->flags = TPM_CHIP_FLAG_TPM2; |
213 | 212 | ||
214 | rc = tpm_get_timeouts(chip); | ||
215 | if (rc) | ||
216 | return rc; | ||
217 | |||
218 | rc = tpm2_do_selftest(chip); | ||
219 | if (rc) | ||
220 | return rc; | ||
221 | |||
222 | return tpm_chip_register(chip); | 213 | return tpm_chip_register(chip); |
223 | } | 214 | } |
224 | 215 | ||
225 | static int crb_check_resource(struct acpi_resource *ares, void *data) | 216 | static int crb_check_resource(struct acpi_resource *ares, void *data) |
226 | { | 217 | { |
227 | struct crb_priv *priv = data; | 218 | struct resource *io_res = data; |
228 | struct resource res; | 219 | struct resource res; |
229 | 220 | ||
230 | if (acpi_dev_resource_memory(ares, &res)) { | 221 | if (acpi_dev_resource_memory(ares, &res)) { |
231 | priv->res = res; | 222 | *io_res = res; |
232 | priv->res.name = NULL; | 223 | io_res->name = NULL; |
233 | } | 224 | } |
234 | 225 | ||
235 | return 1; | 226 | return 1; |
236 | } | 227 | } |
237 | 228 | ||
238 | static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, | 229 | static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, |
239 | u64 start, u32 size) | 230 | struct resource *io_res, u64 start, u32 size) |
240 | { | 231 | { |
241 | struct resource new_res = { | 232 | struct resource new_res = { |
242 | .start = start, | 233 | .start = start, |
@@ -246,53 +237,74 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, | |||
246 | 237 | ||
247 | /* Detect a 64 bit address on a 32 bit system */ | 238 | /* Detect a 64 bit address on a 32 bit system */ |
248 | if (start != new_res.start) | 239 | if (start != new_res.start) |
249 | return ERR_PTR(-EINVAL); | 240 | return (void __iomem *) ERR_PTR(-EINVAL); |
250 | 241 | ||
251 | if (!resource_contains(&priv->res, &new_res)) | 242 | if (!resource_contains(io_res, &new_res)) |
252 | return devm_ioremap_resource(dev, &new_res); | 243 | return devm_ioremap_resource(dev, &new_res); |
253 | 244 | ||
254 | return priv->iobase + (new_res.start - priv->res.start); | 245 | return priv->iobase + (new_res.start - io_res->start); |
255 | } | 246 | } |
256 | 247 | ||
257 | static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, | 248 | static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
258 | struct acpi_table_tpm2 *buf) | 249 | struct acpi_table_tpm2 *buf) |
259 | { | 250 | { |
260 | struct list_head resources; | 251 | struct list_head resources; |
252 | struct resource io_res; | ||
261 | struct device *dev = &device->dev; | 253 | struct device *dev = &device->dev; |
262 | u64 pa; | 254 | u64 cmd_pa; |
255 | u32 cmd_size; | ||
256 | u64 rsp_pa; | ||
257 | u32 rsp_size; | ||
263 | int ret; | 258 | int ret; |
264 | 259 | ||
265 | INIT_LIST_HEAD(&resources); | 260 | INIT_LIST_HEAD(&resources); |
266 | ret = acpi_dev_get_resources(device, &resources, crb_check_resource, | 261 | ret = acpi_dev_get_resources(device, &resources, crb_check_resource, |
267 | priv); | 262 | &io_res); |
268 | if (ret < 0) | 263 | if (ret < 0) |
269 | return ret; | 264 | return ret; |
270 | acpi_dev_free_resource_list(&resources); | 265 | acpi_dev_free_resource_list(&resources); |
271 | 266 | ||
272 | if (resource_type(&priv->res) != IORESOURCE_MEM) { | 267 | if (resource_type(&io_res) != IORESOURCE_MEM) { |
273 | dev_err(dev, | 268 | dev_err(dev, |
274 | FW_BUG "TPM2 ACPI table does not define a memory resource\n"); | 269 | FW_BUG "TPM2 ACPI table does not define a memory resource\n"); |
275 | return -EINVAL; | 270 | return -EINVAL; |
276 | } | 271 | } |
277 | 272 | ||
278 | priv->iobase = devm_ioremap_resource(dev, &priv->res); | 273 | priv->iobase = devm_ioremap_resource(dev, &io_res); |
279 | if (IS_ERR(priv->iobase)) | 274 | if (IS_ERR(priv->iobase)) |
280 | return PTR_ERR(priv->iobase); | 275 | return PTR_ERR(priv->iobase); |
281 | 276 | ||
282 | priv->cca = crb_map_res(dev, priv, buf->control_address, 0x1000); | 277 | priv->cca = crb_map_res(dev, priv, &io_res, buf->control_address, |
278 | sizeof(struct crb_control_area)); | ||
283 | if (IS_ERR(priv->cca)) | 279 | if (IS_ERR(priv->cca)) |
284 | return PTR_ERR(priv->cca); | 280 | return PTR_ERR(priv->cca); |
285 | 281 | ||
286 | pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) | | 282 | cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) | |
287 | (u64) ioread32(&priv->cca->cmd_pa_low); | 283 | (u64) ioread32(&priv->cca->cmd_pa_low); |
288 | priv->cmd = crb_map_res(dev, priv, pa, ioread32(&priv->cca->cmd_size)); | 284 | cmd_size = ioread32(&priv->cca->cmd_size); |
285 | priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); | ||
289 | if (IS_ERR(priv->cmd)) | 286 | if (IS_ERR(priv->cmd)) |
290 | return PTR_ERR(priv->cmd); | 287 | return PTR_ERR(priv->cmd); |
291 | 288 | ||
292 | memcpy_fromio(&pa, &priv->cca->rsp_pa, 8); | 289 | memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8); |
293 | pa = le64_to_cpu(pa); | 290 | rsp_pa = le64_to_cpu(rsp_pa); |
294 | priv->rsp = crb_map_res(dev, priv, pa, ioread32(&priv->cca->rsp_size)); | 291 | rsp_size = ioread32(&priv->cca->rsp_size); |
295 | return PTR_ERR_OR_ZERO(priv->rsp); | 292 | |
293 | if (cmd_pa != rsp_pa) { | ||
294 | priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); | ||
295 | return PTR_ERR_OR_ZERO(priv->rsp); | ||
296 | } | ||
297 | |||
298 | /* According to the PTP specification, overlapping command and response | ||
299 | * buffer sizes must be identical. | ||
300 | */ | ||
301 | if (cmd_size != rsp_size) { | ||
302 | dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical"); | ||
303 | return -EINVAL; | ||
304 | } | ||
305 | |||
306 | priv->rsp = priv->cmd; | ||
307 | return 0; | ||
296 | } | 308 | } |
297 | 309 | ||
298 | static int crb_acpi_add(struct acpi_device *device) | 310 | static int crb_acpi_add(struct acpi_device *device) |
@@ -344,9 +356,6 @@ static int crb_acpi_remove(struct acpi_device *device) | |||
344 | struct device *dev = &device->dev; | 356 | struct device *dev = &device->dev; |
345 | struct tpm_chip *chip = dev_get_drvdata(dev); | 357 | struct tpm_chip *chip = dev_get_drvdata(dev); |
346 | 358 | ||
347 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
348 | tpm2_shutdown(chip, TPM2_SU_CLEAR); | ||
349 | |||
350 | tpm_chip_unregister(chip); | 359 | tpm_chip_unregister(chip); |
351 | 360 | ||
352 | return 0; | 361 | return 0; |
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c index 4e6940acf639..e7228863290e 100644 --- a/drivers/char/tpm/tpm_eventlog.c +++ b/drivers/char/tpm/tpm_eventlog.c | |||
@@ -403,7 +403,7 @@ static int is_bad(void *p) | |||
403 | return 0; | 403 | return 0; |
404 | } | 404 | } |
405 | 405 | ||
406 | struct dentry **tpm_bios_log_setup(char *name) | 406 | struct dentry **tpm_bios_log_setup(const char *name) |
407 | { | 407 | { |
408 | struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file; | 408 | struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file; |
409 | 409 | ||
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h index 267bfbd1b7bb..8de62b09be51 100644 --- a/drivers/char/tpm/tpm_eventlog.h +++ b/drivers/char/tpm/tpm_eventlog.h | |||
@@ -77,10 +77,10 @@ int read_log(struct tpm_bios_log *log); | |||
77 | 77 | ||
78 | #if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \ | 78 | #if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \ |
79 | defined(CONFIG_ACPI) | 79 | defined(CONFIG_ACPI) |
80 | extern struct dentry **tpm_bios_log_setup(char *); | 80 | extern struct dentry **tpm_bios_log_setup(const char *); |
81 | extern void tpm_bios_log_teardown(struct dentry **); | 81 | extern void tpm_bios_log_teardown(struct dentry **); |
82 | #else | 82 | #else |
83 | static inline struct dentry **tpm_bios_log_setup(char *name) | 83 | static inline struct dentry **tpm_bios_log_setup(const char *name) |
84 | { | 84 | { |
85 | return NULL; | 85 | return NULL; |
86 | } | 86 | } |
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 8dfb88b9739c..95ce2e9ccdc6 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c | |||
@@ -51,8 +51,8 @@ struct priv_data { | |||
51 | 51 | ||
52 | static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) | 52 | static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) |
53 | { | 53 | { |
54 | struct priv_data *priv = chip->vendor.priv; | 54 | struct priv_data *priv = dev_get_drvdata(&chip->dev); |
55 | struct i2c_client *client = to_i2c_client(chip->pdev); | 55 | struct i2c_client *client = to_i2c_client(chip->dev.parent); |
56 | s32 status; | 56 | s32 status; |
57 | 57 | ||
58 | priv->len = 0; | 58 | priv->len = 0; |
@@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
62 | 62 | ||
63 | status = i2c_master_send(client, buf, len); | 63 | status = i2c_master_send(client, buf, len); |
64 | 64 | ||
65 | dev_dbg(chip->pdev, | 65 | dev_dbg(&chip->dev, |
66 | "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, | 66 | "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, |
67 | (int)min_t(size_t, 64, len), buf, len, status); | 67 | (int)min_t(size_t, 64, len), buf, len, status); |
68 | return status; | 68 | return status; |
@@ -70,8 +70,8 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
70 | 70 | ||
71 | static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 71 | static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
72 | { | 72 | { |
73 | struct priv_data *priv = chip->vendor.priv; | 73 | struct priv_data *priv = dev_get_drvdata(&chip->dev); |
74 | struct i2c_client *client = to_i2c_client(chip->pdev); | 74 | struct i2c_client *client = to_i2c_client(chip->dev.parent); |
75 | struct tpm_output_header *hdr = | 75 | struct tpm_output_header *hdr = |
76 | (struct tpm_output_header *)priv->buffer; | 76 | (struct tpm_output_header *)priv->buffer; |
77 | u32 expected_len; | 77 | u32 expected_len; |
@@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
88 | return -ENOMEM; | 88 | return -ENOMEM; |
89 | 89 | ||
90 | if (priv->len >= expected_len) { | 90 | if (priv->len >= expected_len) { |
91 | dev_dbg(chip->pdev, | 91 | dev_dbg(&chip->dev, |
92 | "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, | 92 | "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, |
93 | (int)min_t(size_t, 64, expected_len), buf, count, | 93 | (int)min_t(size_t, 64, expected_len), buf, count, |
94 | expected_len); | 94 | expected_len); |
@@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | rc = i2c_master_recv(client, buf, expected_len); | 99 | rc = i2c_master_recv(client, buf, expected_len); |
100 | dev_dbg(chip->pdev, | 100 | dev_dbg(&chip->dev, |
101 | "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, | 101 | "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, |
102 | (int)min_t(size_t, 64, expected_len), buf, count, | 102 | (int)min_t(size_t, 64, expected_len), buf, count, |
103 | expected_len); | 103 | expected_len); |
@@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
106 | 106 | ||
107 | static void i2c_atmel_cancel(struct tpm_chip *chip) | 107 | static void i2c_atmel_cancel(struct tpm_chip *chip) |
108 | { | 108 | { |
109 | dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported"); | 109 | dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported"); |
110 | } | 110 | } |
111 | 111 | ||
112 | static u8 i2c_atmel_read_status(struct tpm_chip *chip) | 112 | static u8 i2c_atmel_read_status(struct tpm_chip *chip) |
113 | { | 113 | { |
114 | struct priv_data *priv = chip->vendor.priv; | 114 | struct priv_data *priv = dev_get_drvdata(&chip->dev); |
115 | struct i2c_client *client = to_i2c_client(chip->pdev); | 115 | struct i2c_client *client = to_i2c_client(chip->dev.parent); |
116 | int rc; | 116 | int rc; |
117 | 117 | ||
118 | /* The TPM fails the I2C read until it is ready, so we do the entire | 118 | /* The TPM fails the I2C read until it is ready, so we do the entire |
@@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip) | |||
125 | /* Once the TPM has completed the command the command remains readable | 125 | /* Once the TPM has completed the command the command remains readable |
126 | * until another command is issued. */ | 126 | * until another command is issued. */ |
127 | rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); | 127 | rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); |
128 | dev_dbg(chip->pdev, | 128 | dev_dbg(&chip->dev, |
129 | "%s: sts=%d", __func__, rc); | 129 | "%s: sts=%d", __func__, rc); |
130 | if (rc <= 0) | 130 | if (rc <= 0) |
131 | return 0; | 131 | return 0; |
@@ -141,6 +141,7 @@ static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | static const struct tpm_class_ops i2c_atmel = { | 143 | static const struct tpm_class_ops i2c_atmel = { |
144 | .flags = TPM_OPS_AUTO_STARTUP, | ||
144 | .status = i2c_atmel_read_status, | 145 | .status = i2c_atmel_read_status, |
145 | .recv = i2c_atmel_recv, | 146 | .recv = i2c_atmel_recv, |
146 | .send = i2c_atmel_send, | 147 | .send = i2c_atmel_send, |
@@ -155,6 +156,7 @@ static int i2c_atmel_probe(struct i2c_client *client, | |||
155 | { | 156 | { |
156 | struct tpm_chip *chip; | 157 | struct tpm_chip *chip; |
157 | struct device *dev = &client->dev; | 158 | struct device *dev = &client->dev; |
159 | struct priv_data *priv; | ||
158 | 160 | ||
159 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) | 161 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) |
160 | return -ENODEV; | 162 | return -ENODEV; |
@@ -163,26 +165,21 @@ static int i2c_atmel_probe(struct i2c_client *client, | |||
163 | if (IS_ERR(chip)) | 165 | if (IS_ERR(chip)) |
164 | return PTR_ERR(chip); | 166 | return PTR_ERR(chip); |
165 | 167 | ||
166 | chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), | 168 | priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); |
167 | GFP_KERNEL); | 169 | if (!priv) |
168 | if (!chip->vendor.priv) | ||
169 | return -ENOMEM; | 170 | return -ENOMEM; |
170 | 171 | ||
171 | /* Default timeouts */ | 172 | /* Default timeouts */ |
172 | chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); | 173 | chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); |
173 | chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); | 174 | chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); |
174 | chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); | 175 | chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); |
175 | chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); | 176 | chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); |
176 | chip->vendor.irq = 0; | 177 | |
178 | dev_set_drvdata(&chip->dev, priv); | ||
177 | 179 | ||
178 | /* There is no known way to probe for this device, and all version | 180 | /* There is no known way to probe for this device, and all version |
179 | * information seems to be read via TPM commands. Thus we rely on the | 181 | * information seems to be read via TPM commands. Thus we rely on the |
180 | * TPM startup process in the common code to detect the device. */ | 182 | * TPM startup process in the common code to detect the device. */ |
181 | if (tpm_get_timeouts(chip)) | ||
182 | return -ENODEV; | ||
183 | |||
184 | if (tpm_do_selftest(chip)) | ||
185 | return -ENODEV; | ||
186 | 183 | ||
187 | return tpm_chip_register(chip); | 184 | return tpm_chip_register(chip); |
188 | } | 185 | } |
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 63d5d22e9e60..62ee44e57ddc 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c | |||
@@ -66,6 +66,7 @@ enum i2c_chip_type { | |||
66 | /* Structure to store I2C TPM specific stuff */ | 66 | /* Structure to store I2C TPM specific stuff */ |
67 | struct tpm_inf_dev { | 67 | struct tpm_inf_dev { |
68 | struct i2c_client *client; | 68 | struct i2c_client *client; |
69 | int locality; | ||
69 | u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */ | 70 | u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */ |
70 | struct tpm_chip *chip; | 71 | struct tpm_chip *chip; |
71 | enum i2c_chip_type chip_type; | 72 | enum i2c_chip_type chip_type; |
@@ -288,7 +289,7 @@ static int check_locality(struct tpm_chip *chip, int loc) | |||
288 | 289 | ||
289 | if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == | 290 | if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == |
290 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { | 291 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { |
291 | chip->vendor.locality = loc; | 292 | tpm_dev.locality = loc; |
292 | return loc; | 293 | return loc; |
293 | } | 294 | } |
294 | 295 | ||
@@ -320,7 +321,7 @@ static int request_locality(struct tpm_chip *chip, int loc) | |||
320 | iic_tpm_write(TPM_ACCESS(loc), &buf, 1); | 321 | iic_tpm_write(TPM_ACCESS(loc), &buf, 1); |
321 | 322 | ||
322 | /* wait for burstcount */ | 323 | /* wait for burstcount */ |
323 | stop = jiffies + chip->vendor.timeout_a; | 324 | stop = jiffies + chip->timeout_a; |
324 | do { | 325 | do { |
325 | if (check_locality(chip, loc) >= 0) | 326 | if (check_locality(chip, loc) >= 0) |
326 | return loc; | 327 | return loc; |
@@ -337,7 +338,7 @@ static u8 tpm_tis_i2c_status(struct tpm_chip *chip) | |||
337 | u8 i = 0; | 338 | u8 i = 0; |
338 | 339 | ||
339 | do { | 340 | do { |
340 | if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0) | 341 | if (iic_tpm_read(TPM_STS(tpm_dev.locality), &buf, 1) < 0) |
341 | return 0; | 342 | return 0; |
342 | 343 | ||
343 | i++; | 344 | i++; |
@@ -351,7 +352,7 @@ static void tpm_tis_i2c_ready(struct tpm_chip *chip) | |||
351 | { | 352 | { |
352 | /* this causes the current command to be aborted */ | 353 | /* this causes the current command to be aborted */ |
353 | u8 buf = TPM_STS_COMMAND_READY; | 354 | u8 buf = TPM_STS_COMMAND_READY; |
354 | iic_tpm_write_long(TPM_STS(chip->vendor.locality), &buf, 1); | 355 | iic_tpm_write_long(TPM_STS(tpm_dev.locality), &buf, 1); |
355 | } | 356 | } |
356 | 357 | ||
357 | static ssize_t get_burstcount(struct tpm_chip *chip) | 358 | static ssize_t get_burstcount(struct tpm_chip *chip) |
@@ -362,10 +363,10 @@ static ssize_t get_burstcount(struct tpm_chip *chip) | |||
362 | 363 | ||
363 | /* wait for burstcount */ | 364 | /* wait for burstcount */ |
364 | /* which timeout value, spec has 2 answers (c & d) */ | 365 | /* which timeout value, spec has 2 answers (c & d) */ |
365 | stop = jiffies + chip->vendor.timeout_d; | 366 | stop = jiffies + chip->timeout_d; |
366 | do { | 367 | do { |
367 | /* Note: STS is little endian */ | 368 | /* Note: STS is little endian */ |
368 | if (iic_tpm_read(TPM_STS(chip->vendor.locality)+1, buf, 3) < 0) | 369 | if (iic_tpm_read(TPM_STS(tpm_dev.locality)+1, buf, 3) < 0) |
369 | burstcnt = 0; | 370 | burstcnt = 0; |
370 | else | 371 | else |
371 | burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0]; | 372 | burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0]; |
@@ -419,7 +420,7 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | |||
419 | if (burstcnt > (count - size)) | 420 | if (burstcnt > (count - size)) |
420 | burstcnt = count - size; | 421 | burstcnt = count - size; |
421 | 422 | ||
422 | rc = iic_tpm_read(TPM_DATA_FIFO(chip->vendor.locality), | 423 | rc = iic_tpm_read(TPM_DATA_FIFO(tpm_dev.locality), |
423 | &(buf[size]), burstcnt); | 424 | &(buf[size]), burstcnt); |
424 | if (rc == 0) | 425 | if (rc == 0) |
425 | size += burstcnt; | 426 | size += burstcnt; |
@@ -446,7 +447,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
446 | /* read first 10 bytes, including tag, paramsize, and result */ | 447 | /* read first 10 bytes, including tag, paramsize, and result */ |
447 | size = recv_data(chip, buf, TPM_HEADER_SIZE); | 448 | size = recv_data(chip, buf, TPM_HEADER_SIZE); |
448 | if (size < TPM_HEADER_SIZE) { | 449 | if (size < TPM_HEADER_SIZE) { |
449 | dev_err(chip->pdev, "Unable to read header\n"); | 450 | dev_err(&chip->dev, "Unable to read header\n"); |
450 | goto out; | 451 | goto out; |
451 | } | 452 | } |
452 | 453 | ||
@@ -459,14 +460,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
459 | size += recv_data(chip, &buf[TPM_HEADER_SIZE], | 460 | size += recv_data(chip, &buf[TPM_HEADER_SIZE], |
460 | expected - TPM_HEADER_SIZE); | 461 | expected - TPM_HEADER_SIZE); |
461 | if (size < expected) { | 462 | if (size < expected) { |
462 | dev_err(chip->pdev, "Unable to read remainder of result\n"); | 463 | dev_err(&chip->dev, "Unable to read remainder of result\n"); |
463 | size = -ETIME; | 464 | size = -ETIME; |
464 | goto out; | 465 | goto out; |
465 | } | 466 | } |
466 | 467 | ||
467 | wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); | 468 | wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status); |
468 | if (status & TPM_STS_DATA_AVAIL) { /* retry? */ | 469 | if (status & TPM_STS_DATA_AVAIL) { /* retry? */ |
469 | dev_err(chip->pdev, "Error left over data\n"); | 470 | dev_err(&chip->dev, "Error left over data\n"); |
470 | size = -EIO; | 471 | size = -EIO; |
471 | goto out; | 472 | goto out; |
472 | } | 473 | } |
@@ -477,7 +478,7 @@ out: | |||
477 | * so we sleep rather than keeping the bus busy | 478 | * so we sleep rather than keeping the bus busy |
478 | */ | 479 | */ |
479 | usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); | 480 | usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); |
480 | release_locality(chip, chip->vendor.locality, 0); | 481 | release_locality(chip, tpm_dev.locality, 0); |
481 | return size; | 482 | return size; |
482 | } | 483 | } |
483 | 484 | ||
@@ -500,7 +501,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
500 | tpm_tis_i2c_ready(chip); | 501 | tpm_tis_i2c_ready(chip); |
501 | if (wait_for_stat | 502 | if (wait_for_stat |
502 | (chip, TPM_STS_COMMAND_READY, | 503 | (chip, TPM_STS_COMMAND_READY, |
503 | chip->vendor.timeout_b, &status) < 0) { | 504 | chip->timeout_b, &status) < 0) { |
504 | rc = -ETIME; | 505 | rc = -ETIME; |
505 | goto out_err; | 506 | goto out_err; |
506 | } | 507 | } |
@@ -516,7 +517,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
516 | if (burstcnt > (len - 1 - count)) | 517 | if (burstcnt > (len - 1 - count)) |
517 | burstcnt = len - 1 - count; | 518 | burstcnt = len - 1 - count; |
518 | 519 | ||
519 | rc = iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), | 520 | rc = iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), |
520 | &(buf[count]), burstcnt); | 521 | &(buf[count]), burstcnt); |
521 | if (rc == 0) | 522 | if (rc == 0) |
522 | count += burstcnt; | 523 | count += burstcnt; |
@@ -530,7 +531,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
530 | } | 531 | } |
531 | 532 | ||
532 | wait_for_stat(chip, TPM_STS_VALID, | 533 | wait_for_stat(chip, TPM_STS_VALID, |
533 | chip->vendor.timeout_c, &status); | 534 | chip->timeout_c, &status); |
534 | 535 | ||
535 | if ((status & TPM_STS_DATA_EXPECT) == 0) { | 536 | if ((status & TPM_STS_DATA_EXPECT) == 0) { |
536 | rc = -EIO; | 537 | rc = -EIO; |
@@ -539,15 +540,15 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
539 | } | 540 | } |
540 | 541 | ||
541 | /* write last byte */ | 542 | /* write last byte */ |
542 | iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), &(buf[count]), 1); | 543 | iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), 1); |
543 | wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); | 544 | wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status); |
544 | if ((status & TPM_STS_DATA_EXPECT) != 0) { | 545 | if ((status & TPM_STS_DATA_EXPECT) != 0) { |
545 | rc = -EIO; | 546 | rc = -EIO; |
546 | goto out_err; | 547 | goto out_err; |
547 | } | 548 | } |
548 | 549 | ||
549 | /* go and do it */ | 550 | /* go and do it */ |
550 | iic_tpm_write(TPM_STS(chip->vendor.locality), &sts, 1); | 551 | iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1); |
551 | 552 | ||
552 | return len; | 553 | return len; |
553 | out_err: | 554 | out_err: |
@@ -556,7 +557,7 @@ out_err: | |||
556 | * so we sleep rather than keeping the bus busy | 557 | * so we sleep rather than keeping the bus busy |
557 | */ | 558 | */ |
558 | usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); | 559 | usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); |
559 | release_locality(chip, chip->vendor.locality, 0); | 560 | release_locality(chip, tpm_dev.locality, 0); |
560 | return rc; | 561 | return rc; |
561 | } | 562 | } |
562 | 563 | ||
@@ -566,6 +567,7 @@ static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status) | |||
566 | } | 567 | } |
567 | 568 | ||
568 | static const struct tpm_class_ops tpm_tis_i2c = { | 569 | static const struct tpm_class_ops tpm_tis_i2c = { |
570 | .flags = TPM_OPS_AUTO_STARTUP, | ||
569 | .status = tpm_tis_i2c_status, | 571 | .status = tpm_tis_i2c_status, |
570 | .recv = tpm_tis_i2c_recv, | 572 | .recv = tpm_tis_i2c_recv, |
571 | .send = tpm_tis_i2c_send, | 573 | .send = tpm_tis_i2c_send, |
@@ -585,14 +587,11 @@ static int tpm_tis_i2c_init(struct device *dev) | |||
585 | if (IS_ERR(chip)) | 587 | if (IS_ERR(chip)) |
586 | return PTR_ERR(chip); | 588 | return PTR_ERR(chip); |
587 | 589 | ||
588 | /* Disable interrupts */ | ||
589 | chip->vendor.irq = 0; | ||
590 | |||
591 | /* Default timeouts */ | 590 | /* Default timeouts */ |
592 | chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | 591 | chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); |
593 | chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); | 592 | chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); |
594 | chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | 593 | chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); |
595 | chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); | 594 | chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); |
596 | 595 | ||
597 | if (request_locality(chip, 0) != 0) { | 596 | if (request_locality(chip, 0) != 0) { |
598 | dev_err(dev, "could not request locality\n"); | 597 | dev_err(dev, "could not request locality\n"); |
@@ -619,15 +618,11 @@ static int tpm_tis_i2c_init(struct device *dev) | |||
619 | 618 | ||
620 | dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16); | 619 | dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16); |
621 | 620 | ||
622 | INIT_LIST_HEAD(&chip->vendor.list); | ||
623 | tpm_dev.chip = chip; | 621 | tpm_dev.chip = chip; |
624 | 622 | ||
625 | tpm_get_timeouts(chip); | ||
626 | tpm_do_selftest(chip); | ||
627 | |||
628 | return tpm_chip_register(chip); | 623 | return tpm_chip_register(chip); |
629 | out_release: | 624 | out_release: |
630 | release_locality(chip, chip->vendor.locality, 1); | 625 | release_locality(chip, tpm_dev.locality, 1); |
631 | tpm_dev.client = NULL; | 626 | tpm_dev.client = NULL; |
632 | out_err: | 627 | out_err: |
633 | return rc; | 628 | return rc; |
@@ -699,7 +694,7 @@ static int tpm_tis_i2c_remove(struct i2c_client *client) | |||
699 | struct tpm_chip *chip = tpm_dev.chip; | 694 | struct tpm_chip *chip = tpm_dev.chip; |
700 | 695 | ||
701 | tpm_chip_unregister(chip); | 696 | tpm_chip_unregister(chip); |
702 | release_locality(chip, chip->vendor.locality, 1); | 697 | release_locality(chip, tpm_dev.locality, 1); |
703 | tpm_dev.client = NULL; | 698 | tpm_dev.client = NULL; |
704 | 699 | ||
705 | return 0; | 700 | return 0; |
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index 847f1597fe9b..e3a9155ee671 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501, | 2 | * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501/NPCT6XX, |
3 | * based on the TCG TPM Interface Spec version 1.2. | 3 | * based on the TCG TPM Interface Spec version 1.2. |
4 | * Specifications at www.trustedcomputinggroup.org | 4 | * Specifications at www.trustedcomputinggroup.org |
5 | * | 5 | * |
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/wait.h> | 32 | #include <linux/wait.h> |
33 | #include <linux/i2c.h> | 33 | #include <linux/i2c.h> |
34 | #include <linux/of_device.h> | ||
34 | #include "tpm.h" | 35 | #include "tpm.h" |
35 | 36 | ||
36 | /* I2C interface offsets */ | 37 | /* I2C interface offsets */ |
@@ -52,10 +53,13 @@ | |||
52 | #define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ | 53 | #define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ |
53 | #define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ | 54 | #define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ |
54 | 55 | ||
55 | #define I2C_DRIVER_NAME "tpm_i2c_nuvoton" | 56 | #define OF_IS_TPM2 ((void *)1) |
57 | #define I2C_IS_TPM2 1 | ||
56 | 58 | ||
57 | struct priv_data { | 59 | struct priv_data { |
60 | int irq; | ||
58 | unsigned int intrs; | 61 | unsigned int intrs; |
62 | wait_queue_head_t read_queue; | ||
59 | }; | 63 | }; |
60 | 64 | ||
61 | static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, | 65 | static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, |
@@ -96,13 +100,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, | |||
96 | /* read TPM_STS register */ | 100 | /* read TPM_STS register */ |
97 | static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) | 101 | static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) |
98 | { | 102 | { |
99 | struct i2c_client *client = to_i2c_client(chip->pdev); | 103 | struct i2c_client *client = to_i2c_client(chip->dev.parent); |
100 | s32 status; | 104 | s32 status; |
101 | u8 data; | 105 | u8 data; |
102 | 106 | ||
103 | status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); | 107 | status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); |
104 | if (status <= 0) { | 108 | if (status <= 0) { |
105 | dev_err(chip->pdev, "%s() error return %d\n", __func__, | 109 | dev_err(&chip->dev, "%s() error return %d\n", __func__, |
106 | status); | 110 | status); |
107 | data = TPM_STS_ERR_VAL; | 111 | data = TPM_STS_ERR_VAL; |
108 | } | 112 | } |
@@ -127,13 +131,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) | |||
127 | /* write commandReady to TPM_STS register */ | 131 | /* write commandReady to TPM_STS register */ |
128 | static void i2c_nuvoton_ready(struct tpm_chip *chip) | 132 | static void i2c_nuvoton_ready(struct tpm_chip *chip) |
129 | { | 133 | { |
130 | struct i2c_client *client = to_i2c_client(chip->pdev); | 134 | struct i2c_client *client = to_i2c_client(chip->dev.parent); |
131 | s32 status; | 135 | s32 status; |
132 | 136 | ||
133 | /* this causes the current command to be aborted */ | 137 | /* this causes the current command to be aborted */ |
134 | status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); | 138 | status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); |
135 | if (status < 0) | 139 | if (status < 0) |
136 | dev_err(chip->pdev, | 140 | dev_err(&chip->dev, |
137 | "%s() fail to write TPM_STS.commandReady\n", __func__); | 141 | "%s() fail to write TPM_STS.commandReady\n", __func__); |
138 | } | 142 | } |
139 | 143 | ||
@@ -142,7 +146,7 @@ static void i2c_nuvoton_ready(struct tpm_chip *chip) | |||
142 | static int i2c_nuvoton_get_burstcount(struct i2c_client *client, | 146 | static int i2c_nuvoton_get_burstcount(struct i2c_client *client, |
143 | struct tpm_chip *chip) | 147 | struct tpm_chip *chip) |
144 | { | 148 | { |
145 | unsigned long stop = jiffies + chip->vendor.timeout_d; | 149 | unsigned long stop = jiffies + chip->timeout_d; |
146 | s32 status; | 150 | s32 status; |
147 | int burst_count = -1; | 151 | int burst_count = -1; |
148 | u8 data; | 152 | u8 data; |
@@ -163,7 +167,7 @@ static int i2c_nuvoton_get_burstcount(struct i2c_client *client, | |||
163 | } | 167 | } |
164 | 168 | ||
165 | /* | 169 | /* |
166 | * WPCT301/NPCT501 SINT# supports only dataAvail | 170 | * WPCT301/NPCT501/NPCT6XX SINT# supports only dataAvail |
167 | * any call to this function which is not waiting for dataAvail will | 171 | * any call to this function which is not waiting for dataAvail will |
168 | * set queue to NULL to avoid waiting for interrupt | 172 | * set queue to NULL to avoid waiting for interrupt |
169 | */ | 173 | */ |
@@ -176,12 +180,12 @@ static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value) | |||
176 | static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, | 180 | static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, |
177 | u32 timeout, wait_queue_head_t *queue) | 181 | u32 timeout, wait_queue_head_t *queue) |
178 | { | 182 | { |
179 | if (chip->vendor.irq && queue) { | 183 | if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) { |
180 | s32 rc; | 184 | s32 rc; |
181 | struct priv_data *priv = chip->vendor.priv; | 185 | struct priv_data *priv = dev_get_drvdata(&chip->dev); |
182 | unsigned int cur_intrs = priv->intrs; | 186 | unsigned int cur_intrs = priv->intrs; |
183 | 187 | ||
184 | enable_irq(chip->vendor.irq); | 188 | enable_irq(priv->irq); |
185 | rc = wait_event_interruptible_timeout(*queue, | 189 | rc = wait_event_interruptible_timeout(*queue, |
186 | cur_intrs != priv->intrs, | 190 | cur_intrs != priv->intrs, |
187 | timeout); | 191 | timeout); |
@@ -212,7 +216,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, | |||
212 | return 0; | 216 | return 0; |
213 | } while (time_before(jiffies, stop)); | 217 | } while (time_before(jiffies, stop)); |
214 | } | 218 | } |
215 | dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask, | 219 | dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, |
216 | value); | 220 | value); |
217 | return -ETIMEDOUT; | 221 | return -ETIMEDOUT; |
218 | } | 222 | } |
@@ -231,16 +235,17 @@ static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout, | |||
231 | static int i2c_nuvoton_recv_data(struct i2c_client *client, | 235 | static int i2c_nuvoton_recv_data(struct i2c_client *client, |
232 | struct tpm_chip *chip, u8 *buf, size_t count) | 236 | struct tpm_chip *chip, u8 *buf, size_t count) |
233 | { | 237 | { |
238 | struct priv_data *priv = dev_get_drvdata(&chip->dev); | ||
234 | s32 rc; | 239 | s32 rc; |
235 | int burst_count, bytes2read, size = 0; | 240 | int burst_count, bytes2read, size = 0; |
236 | 241 | ||
237 | while (size < count && | 242 | while (size < count && |
238 | i2c_nuvoton_wait_for_data_avail(chip, | 243 | i2c_nuvoton_wait_for_data_avail(chip, |
239 | chip->vendor.timeout_c, | 244 | chip->timeout_c, |
240 | &chip->vendor.read_queue) == 0) { | 245 | &priv->read_queue) == 0) { |
241 | burst_count = i2c_nuvoton_get_burstcount(client, chip); | 246 | burst_count = i2c_nuvoton_get_burstcount(client, chip); |
242 | if (burst_count < 0) { | 247 | if (burst_count < 0) { |
243 | dev_err(chip->pdev, | 248 | dev_err(&chip->dev, |
244 | "%s() fail to read burstCount=%d\n", __func__, | 249 | "%s() fail to read burstCount=%d\n", __func__, |
245 | burst_count); | 250 | burst_count); |
246 | return -EIO; | 251 | return -EIO; |
@@ -249,12 +254,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, | |||
249 | rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, | 254 | rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, |
250 | bytes2read, &buf[size]); | 255 | bytes2read, &buf[size]); |
251 | if (rc < 0) { | 256 | if (rc < 0) { |
252 | dev_err(chip->pdev, | 257 | dev_err(&chip->dev, |
253 | "%s() fail on i2c_nuvoton_read_buf()=%d\n", | 258 | "%s() fail on i2c_nuvoton_read_buf()=%d\n", |
254 | __func__, rc); | 259 | __func__, rc); |
255 | return -EIO; | 260 | return -EIO; |
256 | } | 261 | } |
257 | dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read); | 262 | dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read); |
258 | size += bytes2read; | 263 | size += bytes2read; |
259 | } | 264 | } |
260 | 265 | ||
@@ -264,7 +269,8 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, | |||
264 | /* Read TPM command results */ | 269 | /* Read TPM command results */ |
265 | static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 270 | static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
266 | { | 271 | { |
267 | struct device *dev = chip->pdev; | 272 | struct priv_data *priv = dev_get_drvdata(&chip->dev); |
273 | struct device *dev = chip->dev.parent; | ||
268 | struct i2c_client *client = to_i2c_client(dev); | 274 | struct i2c_client *client = to_i2c_client(dev); |
269 | s32 rc; | 275 | s32 rc; |
270 | int expected, status, burst_count, retries, size = 0; | 276 | int expected, status, burst_count, retries, size = 0; |
@@ -285,7 +291,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
285 | * tag, paramsize, and result | 291 | * tag, paramsize, and result |
286 | */ | 292 | */ |
287 | status = i2c_nuvoton_wait_for_data_avail( | 293 | status = i2c_nuvoton_wait_for_data_avail( |
288 | chip, chip->vendor.timeout_c, &chip->vendor.read_queue); | 294 | chip, chip->timeout_c, &priv->read_queue); |
289 | if (status != 0) { | 295 | if (status != 0) { |
290 | dev_err(dev, "%s() timeout on dataAvail\n", __func__); | 296 | dev_err(dev, "%s() timeout on dataAvail\n", __func__); |
291 | size = -ETIMEDOUT; | 297 | size = -ETIMEDOUT; |
@@ -325,7 +331,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
325 | } | 331 | } |
326 | if (i2c_nuvoton_wait_for_stat( | 332 | if (i2c_nuvoton_wait_for_stat( |
327 | chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, | 333 | chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, |
328 | TPM_STS_VALID, chip->vendor.timeout_c, | 334 | TPM_STS_VALID, chip->timeout_c, |
329 | NULL)) { | 335 | NULL)) { |
330 | dev_err(dev, "%s() error left over data\n", __func__); | 336 | dev_err(dev, "%s() error left over data\n", __func__); |
331 | size = -ETIMEDOUT; | 337 | size = -ETIMEDOUT; |
@@ -334,7 +340,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
334 | break; | 340 | break; |
335 | } | 341 | } |
336 | i2c_nuvoton_ready(chip); | 342 | i2c_nuvoton_ready(chip); |
337 | dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size); | 343 | dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size); |
338 | return size; | 344 | return size; |
339 | } | 345 | } |
340 | 346 | ||
@@ -347,7 +353,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
347 | */ | 353 | */ |
348 | static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) | 354 | static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) |
349 | { | 355 | { |
350 | struct device *dev = chip->pdev; | 356 | struct priv_data *priv = dev_get_drvdata(&chip->dev); |
357 | struct device *dev = chip->dev.parent; | ||
351 | struct i2c_client *client = to_i2c_client(dev); | 358 | struct i2c_client *client = to_i2c_client(dev); |
352 | u32 ordinal; | 359 | u32 ordinal; |
353 | size_t count = 0; | 360 | size_t count = 0; |
@@ -357,7 +364,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
357 | i2c_nuvoton_ready(chip); | 364 | i2c_nuvoton_ready(chip); |
358 | if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, | 365 | if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, |
359 | TPM_STS_COMMAND_READY, | 366 | TPM_STS_COMMAND_READY, |
360 | chip->vendor.timeout_b, NULL)) { | 367 | chip->timeout_b, NULL)) { |
361 | dev_err(dev, "%s() timeout on commandReady\n", | 368 | dev_err(dev, "%s() timeout on commandReady\n", |
362 | __func__); | 369 | __func__); |
363 | rc = -EIO; | 370 | rc = -EIO; |
@@ -389,7 +396,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
389 | TPM_STS_EXPECT, | 396 | TPM_STS_EXPECT, |
390 | TPM_STS_VALID | | 397 | TPM_STS_VALID | |
391 | TPM_STS_EXPECT, | 398 | TPM_STS_EXPECT, |
392 | chip->vendor.timeout_c, | 399 | chip->timeout_c, |
393 | NULL); | 400 | NULL); |
394 | if (rc < 0) { | 401 | if (rc < 0) { |
395 | dev_err(dev, "%s() timeout on Expect\n", | 402 | dev_err(dev, "%s() timeout on Expect\n", |
@@ -414,7 +421,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
414 | rc = i2c_nuvoton_wait_for_stat(chip, | 421 | rc = i2c_nuvoton_wait_for_stat(chip, |
415 | TPM_STS_VALID | TPM_STS_EXPECT, | 422 | TPM_STS_VALID | TPM_STS_EXPECT, |
416 | TPM_STS_VALID, | 423 | TPM_STS_VALID, |
417 | chip->vendor.timeout_c, NULL); | 424 | chip->timeout_c, NULL); |
418 | if (rc) { | 425 | if (rc) { |
419 | dev_err(dev, "%s() timeout on Expect to clear\n", | 426 | dev_err(dev, "%s() timeout on Expect to clear\n", |
420 | __func__); | 427 | __func__); |
@@ -439,7 +446,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
439 | rc = i2c_nuvoton_wait_for_data_avail(chip, | 446 | rc = i2c_nuvoton_wait_for_data_avail(chip, |
440 | tpm_calc_ordinal_duration(chip, | 447 | tpm_calc_ordinal_duration(chip, |
441 | ordinal), | 448 | ordinal), |
442 | &chip->vendor.read_queue); | 449 | &priv->read_queue); |
443 | if (rc) { | 450 | if (rc) { |
444 | dev_err(dev, "%s() timeout command duration\n", __func__); | 451 | dev_err(dev, "%s() timeout command duration\n", __func__); |
445 | i2c_nuvoton_ready(chip); | 452 | i2c_nuvoton_ready(chip); |
@@ -456,6 +463,7 @@ static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) | |||
456 | } | 463 | } |
457 | 464 | ||
458 | static const struct tpm_class_ops tpm_i2c = { | 465 | static const struct tpm_class_ops tpm_i2c = { |
466 | .flags = TPM_OPS_AUTO_STARTUP, | ||
459 | .status = i2c_nuvoton_read_status, | 467 | .status = i2c_nuvoton_read_status, |
460 | .recv = i2c_nuvoton_recv, | 468 | .recv = i2c_nuvoton_recv, |
461 | .send = i2c_nuvoton_send, | 469 | .send = i2c_nuvoton_send, |
@@ -473,11 +481,11 @@ static const struct tpm_class_ops tpm_i2c = { | |||
473 | static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) | 481 | static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) |
474 | { | 482 | { |
475 | struct tpm_chip *chip = dev_id; | 483 | struct tpm_chip *chip = dev_id; |
476 | struct priv_data *priv = chip->vendor.priv; | 484 | struct priv_data *priv = dev_get_drvdata(&chip->dev); |
477 | 485 | ||
478 | priv->intrs++; | 486 | priv->intrs++; |
479 | wake_up(&chip->vendor.read_queue); | 487 | wake_up(&priv->read_queue); |
480 | disable_irq_nosync(chip->vendor.irq); | 488 | disable_irq_nosync(priv->irq); |
481 | return IRQ_HANDLED; | 489 | return IRQ_HANDLED; |
482 | } | 490 | } |
483 | 491 | ||
@@ -521,6 +529,7 @@ static int i2c_nuvoton_probe(struct i2c_client *client, | |||
521 | int rc; | 529 | int rc; |
522 | struct tpm_chip *chip; | 530 | struct tpm_chip *chip; |
523 | struct device *dev = &client->dev; | 531 | struct device *dev = &client->dev; |
532 | struct priv_data *priv; | ||
524 | u32 vid = 0; | 533 | u32 vid = 0; |
525 | 534 | ||
526 | rc = get_vid(client, &vid); | 535 | rc = get_vid(client, &vid); |
@@ -534,46 +543,56 @@ static int i2c_nuvoton_probe(struct i2c_client *client, | |||
534 | if (IS_ERR(chip)) | 543 | if (IS_ERR(chip)) |
535 | return PTR_ERR(chip); | 544 | return PTR_ERR(chip); |
536 | 545 | ||
537 | chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), | 546 | priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); |
538 | GFP_KERNEL); | 547 | if (!priv) |
539 | if (!chip->vendor.priv) | ||
540 | return -ENOMEM; | 548 | return -ENOMEM; |
541 | 549 | ||
542 | init_waitqueue_head(&chip->vendor.read_queue); | 550 | if (dev->of_node) { |
543 | init_waitqueue_head(&chip->vendor.int_queue); | 551 | const struct of_device_id *of_id; |
552 | |||
553 | of_id = of_match_device(dev->driver->of_match_table, dev); | ||
554 | if (of_id && of_id->data == OF_IS_TPM2) | ||
555 | chip->flags |= TPM_CHIP_FLAG_TPM2; | ||
556 | } else | ||
557 | if (id->driver_data == I2C_IS_TPM2) | ||
558 | chip->flags |= TPM_CHIP_FLAG_TPM2; | ||
559 | |||
560 | init_waitqueue_head(&priv->read_queue); | ||
544 | 561 | ||
545 | /* Default timeouts */ | 562 | /* Default timeouts */ |
546 | chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); | 563 | chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); |
547 | chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); | 564 | chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); |
548 | chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); | 565 | chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); |
549 | chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); | 566 | chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); |
567 | |||
568 | dev_set_drvdata(&chip->dev, priv); | ||
550 | 569 | ||
551 | /* | 570 | /* |
552 | * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: | 571 | * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: |
553 | * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT | 572 | * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT |
554 | * The IRQ should be set in the i2c_board_info (which is done | 573 | * The IRQ should be set in the i2c_board_info (which is done |
555 | * automatically in of_i2c_register_devices, for device tree users */ | 574 | * automatically in of_i2c_register_devices, for device tree users */ |
556 | chip->vendor.irq = client->irq; | 575 | priv->irq = client->irq; |
557 | 576 | if (client->irq) { | |
558 | if (chip->vendor.irq) { | 577 | dev_dbg(dev, "%s() priv->irq\n", __func__); |
559 | dev_dbg(dev, "%s() chip-vendor.irq\n", __func__); | 578 | rc = devm_request_irq(dev, client->irq, |
560 | rc = devm_request_irq(dev, chip->vendor.irq, | ||
561 | i2c_nuvoton_int_handler, | 579 | i2c_nuvoton_int_handler, |
562 | IRQF_TRIGGER_LOW, | 580 | IRQF_TRIGGER_LOW, |
563 | chip->devname, | 581 | dev_name(&chip->dev), |
564 | chip); | 582 | chip); |
565 | if (rc) { | 583 | if (rc) { |
566 | dev_err(dev, "%s() Unable to request irq: %d for use\n", | 584 | dev_err(dev, "%s() Unable to request irq: %d for use\n", |
567 | __func__, chip->vendor.irq); | 585 | __func__, priv->irq); |
568 | chip->vendor.irq = 0; | 586 | priv->irq = 0; |
569 | } else { | 587 | } else { |
588 | chip->flags |= TPM_CHIP_FLAG_IRQ; | ||
570 | /* Clear any pending interrupt */ | 589 | /* Clear any pending interrupt */ |
571 | i2c_nuvoton_ready(chip); | 590 | i2c_nuvoton_ready(chip); |
572 | /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ | 591 | /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ |
573 | rc = i2c_nuvoton_wait_for_stat(chip, | 592 | rc = i2c_nuvoton_wait_for_stat(chip, |
574 | TPM_STS_COMMAND_READY, | 593 | TPM_STS_COMMAND_READY, |
575 | TPM_STS_COMMAND_READY, | 594 | TPM_STS_COMMAND_READY, |
576 | chip->vendor.timeout_b, | 595 | chip->timeout_b, |
577 | NULL); | 596 | NULL); |
578 | if (rc == 0) { | 597 | if (rc == 0) { |
579 | /* | 598 | /* |
@@ -601,25 +620,20 @@ static int i2c_nuvoton_probe(struct i2c_client *client, | |||
601 | } | 620 | } |
602 | } | 621 | } |
603 | 622 | ||
604 | if (tpm_get_timeouts(chip)) | ||
605 | return -ENODEV; | ||
606 | |||
607 | if (tpm_do_selftest(chip)) | ||
608 | return -ENODEV; | ||
609 | |||
610 | return tpm_chip_register(chip); | 623 | return tpm_chip_register(chip); |
611 | } | 624 | } |
612 | 625 | ||
613 | static int i2c_nuvoton_remove(struct i2c_client *client) | 626 | static int i2c_nuvoton_remove(struct i2c_client *client) |
614 | { | 627 | { |
615 | struct device *dev = &(client->dev); | 628 | struct tpm_chip *chip = i2c_get_clientdata(client); |
616 | struct tpm_chip *chip = dev_get_drvdata(dev); | 629 | |
617 | tpm_chip_unregister(chip); | 630 | tpm_chip_unregister(chip); |
618 | return 0; | 631 | return 0; |
619 | } | 632 | } |
620 | 633 | ||
621 | static const struct i2c_device_id i2c_nuvoton_id[] = { | 634 | static const struct i2c_device_id i2c_nuvoton_id[] = { |
622 | {I2C_DRIVER_NAME, 0}, | 635 | {"tpm_i2c_nuvoton"}, |
636 | {"tpm2_i2c_nuvoton", .driver_data = I2C_IS_TPM2}, | ||
623 | {} | 637 | {} |
624 | }; | 638 | }; |
625 | MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); | 639 | MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); |
@@ -628,6 +642,7 @@ MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); | |||
628 | static const struct of_device_id i2c_nuvoton_of_match[] = { | 642 | static const struct of_device_id i2c_nuvoton_of_match[] = { |
629 | {.compatible = "nuvoton,npct501"}, | 643 | {.compatible = "nuvoton,npct501"}, |
630 | {.compatible = "winbond,wpct301"}, | 644 | {.compatible = "winbond,wpct301"}, |
645 | {.compatible = "nuvoton,npct601", .data = OF_IS_TPM2}, | ||
631 | {}, | 646 | {}, |
632 | }; | 647 | }; |
633 | MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); | 648 | MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); |
@@ -640,7 +655,7 @@ static struct i2c_driver i2c_nuvoton_driver = { | |||
640 | .probe = i2c_nuvoton_probe, | 655 | .probe = i2c_nuvoton_probe, |
641 | .remove = i2c_nuvoton_remove, | 656 | .remove = i2c_nuvoton_remove, |
642 | .driver = { | 657 | .driver = { |
643 | .name = I2C_DRIVER_NAME, | 658 | .name = "tpm_i2c_nuvoton", |
644 | .pm = &i2c_nuvoton_pm_ops, | 659 | .pm = &i2c_nuvoton_pm_ops, |
645 | .of_match_table = of_match_ptr(i2c_nuvoton_of_match), | 660 | .of_match_table = of_match_ptr(i2c_nuvoton_of_match), |
646 | }, | 661 | }, |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b0a9a9e34241..946025a7413b 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c | |||
@@ -54,21 +54,6 @@ static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) | |||
54 | } | 54 | } |
55 | 55 | ||
56 | /** | 56 | /** |
57 | * ibmvtpm_get_data - Retrieve ibm vtpm data | ||
58 | * @dev: device struct | ||
59 | * | ||
60 | * Return value: | ||
61 | * vtpm device struct | ||
62 | */ | ||
63 | static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev) | ||
64 | { | ||
65 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
66 | if (chip) | ||
67 | return (struct ibmvtpm_dev *)TPM_VPRIV(chip); | ||
68 | return NULL; | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * tpm_ibmvtpm_recv - Receive data after send | 57 | * tpm_ibmvtpm_recv - Receive data after send |
73 | * @chip: tpm chip struct | 58 | * @chip: tpm chip struct |
74 | * @buf: buffer to read | 59 | * @buf: buffer to read |
@@ -79,12 +64,10 @@ static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev) | |||
79 | */ | 64 | */ |
80 | static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 65 | static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
81 | { | 66 | { |
82 | struct ibmvtpm_dev *ibmvtpm; | 67 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); |
83 | u16 len; | 68 | u16 len; |
84 | int sig; | 69 | int sig; |
85 | 70 | ||
86 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); | ||
87 | |||
88 | if (!ibmvtpm->rtce_buf) { | 71 | if (!ibmvtpm->rtce_buf) { |
89 | dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); | 72 | dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); |
90 | return 0; | 73 | return 0; |
@@ -122,13 +105,11 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
122 | */ | 105 | */ |
123 | static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | 106 | static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) |
124 | { | 107 | { |
125 | struct ibmvtpm_dev *ibmvtpm; | 108 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); |
126 | struct ibmvtpm_crq crq; | 109 | struct ibmvtpm_crq crq; |
127 | __be64 *word = (__be64 *)&crq; | 110 | __be64 *word = (__be64 *)&crq; |
128 | int rc, sig; | 111 | int rc, sig; |
129 | 112 | ||
130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); | ||
131 | |||
132 | if (!ibmvtpm->rtce_buf) { | 113 | if (!ibmvtpm->rtce_buf) { |
133 | dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); | 114 | dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); |
134 | return 0; | 115 | return 0; |
@@ -289,8 +270,8 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) | |||
289 | */ | 270 | */ |
290 | static int tpm_ibmvtpm_remove(struct vio_dev *vdev) | 271 | static int tpm_ibmvtpm_remove(struct vio_dev *vdev) |
291 | { | 272 | { |
292 | struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); | 273 | struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); |
293 | struct tpm_chip *chip = dev_get_drvdata(ibmvtpm->dev); | 274 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); |
294 | int rc = 0; | 275 | int rc = 0; |
295 | 276 | ||
296 | tpm_chip_unregister(chip); | 277 | tpm_chip_unregister(chip); |
@@ -327,7 +308,8 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) | |||
327 | */ | 308 | */ |
328 | static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) | 309 | static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) |
329 | { | 310 | { |
330 | struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); | 311 | struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); |
312 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); | ||
331 | 313 | ||
332 | /* ibmvtpm initializes at probe time, so the data we are | 314 | /* ibmvtpm initializes at probe time, so the data we are |
333 | * asking for may not be set yet. Estimate that 4K required | 315 | * asking for may not be set yet. Estimate that 4K required |
@@ -348,7 +330,8 @@ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) | |||
348 | */ | 330 | */ |
349 | static int tpm_ibmvtpm_suspend(struct device *dev) | 331 | static int tpm_ibmvtpm_suspend(struct device *dev) |
350 | { | 332 | { |
351 | struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); | 333 | struct tpm_chip *chip = dev_get_drvdata(dev); |
334 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); | ||
352 | struct ibmvtpm_crq crq; | 335 | struct ibmvtpm_crq crq; |
353 | u64 *buf = (u64 *) &crq; | 336 | u64 *buf = (u64 *) &crq; |
354 | int rc = 0; | 337 | int rc = 0; |
@@ -400,7 +383,8 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) | |||
400 | */ | 383 | */ |
401 | static int tpm_ibmvtpm_resume(struct device *dev) | 384 | static int tpm_ibmvtpm_resume(struct device *dev) |
402 | { | 385 | { |
403 | struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); | 386 | struct tpm_chip *chip = dev_get_drvdata(dev); |
387 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); | ||
404 | int rc = 0; | 388 | int rc = 0; |
405 | 389 | ||
406 | do { | 390 | do { |
@@ -643,7 +627,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, | |||
643 | 627 | ||
644 | crq_q->index = 0; | 628 | crq_q->index = 0; |
645 | 629 | ||
646 | TPM_VPRIV(chip) = (void *)ibmvtpm; | 630 | dev_set_drvdata(&chip->dev, ibmvtpm); |
647 | 631 | ||
648 | spin_lock_init(&ibmvtpm->rtce_lock); | 632 | spin_lock_init(&ibmvtpm->rtce_lock); |
649 | 633 | ||
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 6c488e635fdd..e3cf9f3545c5 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c | |||
@@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) | |||
195 | } | 195 | } |
196 | if (i == TPM_MAX_TRIES) { /* timeout occurs */ | 196 | if (i == TPM_MAX_TRIES) { /* timeout occurs */ |
197 | if (wait_for_bit == STAT_XFE) | 197 | if (wait_for_bit == STAT_XFE) |
198 | dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n"); | 198 | dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n"); |
199 | if (wait_for_bit == STAT_RDA) | 199 | if (wait_for_bit == STAT_RDA) |
200 | dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n"); | 200 | dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n"); |
201 | return -EIO; | 201 | return -EIO; |
202 | } | 202 | } |
203 | return 0; | 203 | return 0; |
@@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) | |||
220 | static void tpm_wtx(struct tpm_chip *chip) | 220 | static void tpm_wtx(struct tpm_chip *chip) |
221 | { | 221 | { |
222 | number_of_wtx++; | 222 | number_of_wtx++; |
223 | dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n", | 223 | dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n", |
224 | number_of_wtx, TPM_MAX_WTX_PACKAGES); | 224 | number_of_wtx, TPM_MAX_WTX_PACKAGES); |
225 | wait_and_send(chip, TPM_VL_VER); | 225 | wait_and_send(chip, TPM_VL_VER); |
226 | wait_and_send(chip, TPM_CTRL_WTX); | 226 | wait_and_send(chip, TPM_CTRL_WTX); |
@@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip) | |||
231 | 231 | ||
232 | static void tpm_wtx_abort(struct tpm_chip *chip) | 232 | static void tpm_wtx_abort(struct tpm_chip *chip) |
233 | { | 233 | { |
234 | dev_info(chip->pdev, "Aborting WTX\n"); | 234 | dev_info(&chip->dev, "Aborting WTX\n"); |
235 | wait_and_send(chip, TPM_VL_VER); | 235 | wait_and_send(chip, TPM_VL_VER); |
236 | wait_and_send(chip, TPM_CTRL_WTX_ABORT); | 236 | wait_and_send(chip, TPM_CTRL_WTX_ABORT); |
237 | wait_and_send(chip, 0x00); | 237 | wait_and_send(chip, 0x00); |
@@ -257,7 +257,7 @@ recv_begin: | |||
257 | } | 257 | } |
258 | 258 | ||
259 | if (buf[0] != TPM_VL_VER) { | 259 | if (buf[0] != TPM_VL_VER) { |
260 | dev_err(chip->pdev, | 260 | dev_err(&chip->dev, |
261 | "Wrong transport protocol implementation!\n"); | 261 | "Wrong transport protocol implementation!\n"); |
262 | return -EIO; | 262 | return -EIO; |
263 | } | 263 | } |
@@ -272,7 +272,7 @@ recv_begin: | |||
272 | } | 272 | } |
273 | 273 | ||
274 | if ((size == 0x6D00) && (buf[1] == 0x80)) { | 274 | if ((size == 0x6D00) && (buf[1] == 0x80)) { |
275 | dev_err(chip->pdev, "Error handling on vendor layer!\n"); | 275 | dev_err(&chip->dev, "Error handling on vendor layer!\n"); |
276 | return -EIO; | 276 | return -EIO; |
277 | } | 277 | } |
278 | 278 | ||
@@ -284,7 +284,7 @@ recv_begin: | |||
284 | } | 284 | } |
285 | 285 | ||
286 | if (buf[1] == TPM_CTRL_WTX) { | 286 | if (buf[1] == TPM_CTRL_WTX) { |
287 | dev_info(chip->pdev, "WTX-package received\n"); | 287 | dev_info(&chip->dev, "WTX-package received\n"); |
288 | if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { | 288 | if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { |
289 | tpm_wtx(chip); | 289 | tpm_wtx(chip); |
290 | goto recv_begin; | 290 | goto recv_begin; |
@@ -295,14 +295,14 @@ recv_begin: | |||
295 | } | 295 | } |
296 | 296 | ||
297 | if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { | 297 | if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { |
298 | dev_info(chip->pdev, "WTX-abort acknowledged\n"); | 298 | dev_info(&chip->dev, "WTX-abort acknowledged\n"); |
299 | return size; | 299 | return size; |
300 | } | 300 | } |
301 | 301 | ||
302 | if (buf[1] == TPM_CTRL_ERROR) { | 302 | if (buf[1] == TPM_CTRL_ERROR) { |
303 | dev_err(chip->pdev, "ERROR-package received:\n"); | 303 | dev_err(&chip->dev, "ERROR-package received:\n"); |
304 | if (buf[4] == TPM_INF_NAK) | 304 | if (buf[4] == TPM_INF_NAK) |
305 | dev_err(chip->pdev, | 305 | dev_err(&chip->dev, |
306 | "-> Negative acknowledgement" | 306 | "-> Negative acknowledgement" |
307 | " - retransmit command!\n"); | 307 | " - retransmit command!\n"); |
308 | return -EIO; | 308 | return -EIO; |
@@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) | |||
321 | 321 | ||
322 | ret = empty_fifo(chip, 1); | 322 | ret = empty_fifo(chip, 1); |
323 | if (ret) { | 323 | if (ret) { |
324 | dev_err(chip->pdev, "Timeout while clearing FIFO\n"); | 324 | dev_err(&chip->dev, "Timeout while clearing FIFO\n"); |
325 | return -EIO; | 325 | return -EIO; |
326 | } | 326 | } |
327 | 327 | ||
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 289389ecef84..9ff0e072c476 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c | |||
@@ -64,15 +64,21 @@ enum tpm_nsc_cmd_mode { | |||
64 | NSC_COMMAND_EOC = 0x03, | 64 | NSC_COMMAND_EOC = 0x03, |
65 | NSC_COMMAND_CANCEL = 0x22 | 65 | NSC_COMMAND_CANCEL = 0x22 |
66 | }; | 66 | }; |
67 | |||
68 | struct tpm_nsc_priv { | ||
69 | unsigned long base; | ||
70 | }; | ||
71 | |||
67 | /* | 72 | /* |
68 | * Wait for a certain status to appear | 73 | * Wait for a certain status to appear |
69 | */ | 74 | */ |
70 | static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) | 75 | static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) |
71 | { | 76 | { |
77 | struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); | ||
72 | unsigned long stop; | 78 | unsigned long stop; |
73 | 79 | ||
74 | /* status immediately available check */ | 80 | /* status immediately available check */ |
75 | *data = inb(chip->vendor.base + NSC_STATUS); | 81 | *data = inb(priv->base + NSC_STATUS); |
76 | if ((*data & mask) == val) | 82 | if ((*data & mask) == val) |
77 | return 0; | 83 | return 0; |
78 | 84 | ||
@@ -80,7 +86,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) | |||
80 | stop = jiffies + 10 * HZ; | 86 | stop = jiffies + 10 * HZ; |
81 | do { | 87 | do { |
82 | msleep(TPM_TIMEOUT); | 88 | msleep(TPM_TIMEOUT); |
83 | *data = inb(chip->vendor.base + 1); | 89 | *data = inb(priv->base + 1); |
84 | if ((*data & mask) == val) | 90 | if ((*data & mask) == val) |
85 | return 0; | 91 | return 0; |
86 | } | 92 | } |
@@ -91,13 +97,14 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) | |||
91 | 97 | ||
92 | static int nsc_wait_for_ready(struct tpm_chip *chip) | 98 | static int nsc_wait_for_ready(struct tpm_chip *chip) |
93 | { | 99 | { |
100 | struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); | ||
94 | int status; | 101 | int status; |
95 | unsigned long stop; | 102 | unsigned long stop; |
96 | 103 | ||
97 | /* status immediately available check */ | 104 | /* status immediately available check */ |
98 | status = inb(chip->vendor.base + NSC_STATUS); | 105 | status = inb(priv->base + NSC_STATUS); |
99 | if (status & NSC_STATUS_OBF) | 106 | if (status & NSC_STATUS_OBF) |
100 | status = inb(chip->vendor.base + NSC_DATA); | 107 | status = inb(priv->base + NSC_DATA); |
101 | if (status & NSC_STATUS_RDY) | 108 | if (status & NSC_STATUS_RDY) |
102 | return 0; | 109 | return 0; |
103 | 110 | ||
@@ -105,21 +112,22 @@ static int nsc_wait_for_ready(struct tpm_chip *chip) | |||
105 | stop = jiffies + 100; | 112 | stop = jiffies + 100; |
106 | do { | 113 | do { |
107 | msleep(TPM_TIMEOUT); | 114 | msleep(TPM_TIMEOUT); |
108 | status = inb(chip->vendor.base + NSC_STATUS); | 115 | status = inb(priv->base + NSC_STATUS); |
109 | if (status & NSC_STATUS_OBF) | 116 | if (status & NSC_STATUS_OBF) |
110 | status = inb(chip->vendor.base + NSC_DATA); | 117 | status = inb(priv->base + NSC_DATA); |
111 | if (status & NSC_STATUS_RDY) | 118 | if (status & NSC_STATUS_RDY) |
112 | return 0; | 119 | return 0; |
113 | } | 120 | } |
114 | while (time_before(jiffies, stop)); | 121 | while (time_before(jiffies, stop)); |
115 | 122 | ||
116 | dev_info(chip->pdev, "wait for ready failed\n"); | 123 | dev_info(&chip->dev, "wait for ready failed\n"); |
117 | return -EBUSY; | 124 | return -EBUSY; |
118 | } | 125 | } |
119 | 126 | ||
120 | 127 | ||
121 | static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) | 128 | static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) |
122 | { | 129 | { |
130 | struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); | ||
123 | u8 *buffer = buf; | 131 | u8 *buffer = buf; |
124 | u8 data, *p; | 132 | u8 data, *p; |
125 | u32 size; | 133 | u32 size; |
@@ -129,12 +137,13 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) | |||
129 | return -EIO; | 137 | return -EIO; |
130 | 138 | ||
131 | if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { | 139 | if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { |
132 | dev_err(chip->pdev, "F0 timeout\n"); | 140 | dev_err(&chip->dev, "F0 timeout\n"); |
133 | return -EIO; | 141 | return -EIO; |
134 | } | 142 | } |
135 | if ((data = | 143 | |
136 | inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) { | 144 | data = inb(priv->base + NSC_DATA); |
137 | dev_err(chip->pdev, "not in normal mode (0x%x)\n", | 145 | if (data != NSC_COMMAND_NORMAL) { |
146 | dev_err(&chip->dev, "not in normal mode (0x%x)\n", | ||
138 | data); | 147 | data); |
139 | return -EIO; | 148 | return -EIO; |
140 | } | 149 | } |
@@ -143,22 +152,24 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) | |||
143 | for (p = buffer; p < &buffer[count]; p++) { | 152 | for (p = buffer; p < &buffer[count]; p++) { |
144 | if (wait_for_stat | 153 | if (wait_for_stat |
145 | (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { | 154 | (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { |
146 | dev_err(chip->pdev, | 155 | dev_err(&chip->dev, |
147 | "OBF timeout (while reading data)\n"); | 156 | "OBF timeout (while reading data)\n"); |
148 | return -EIO; | 157 | return -EIO; |
149 | } | 158 | } |
150 | if (data & NSC_STATUS_F0) | 159 | if (data & NSC_STATUS_F0) |
151 | break; | 160 | break; |
152 | *p = inb(chip->vendor.base + NSC_DATA); | 161 | *p = inb(priv->base + NSC_DATA); |
153 | } | 162 | } |
154 | 163 | ||
155 | if ((data & NSC_STATUS_F0) == 0 && | 164 | if ((data & NSC_STATUS_F0) == 0 && |
156 | (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { | 165 | (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { |
157 | dev_err(chip->pdev, "F0 not set\n"); | 166 | dev_err(&chip->dev, "F0 not set\n"); |
158 | return -EIO; | 167 | return -EIO; |
159 | } | 168 | } |
160 | if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) { | 169 | |
161 | dev_err(chip->pdev, | 170 | data = inb(priv->base + NSC_DATA); |
171 | if (data != NSC_COMMAND_EOC) { | ||
172 | dev_err(&chip->dev, | ||
162 | "expected end of command(0x%x)\n", data); | 173 | "expected end of command(0x%x)\n", data); |
163 | return -EIO; | 174 | return -EIO; |
164 | } | 175 | } |
@@ -174,6 +185,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) | |||
174 | 185 | ||
175 | static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) | 186 | static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) |
176 | { | 187 | { |
188 | struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); | ||
177 | u8 data; | 189 | u8 data; |
178 | int i; | 190 | int i; |
179 | 191 | ||
@@ -183,48 +195,52 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) | |||
183 | * fix it. Not sure why this is needed, we followed the flow | 195 | * fix it. Not sure why this is needed, we followed the flow |
184 | * chart in the manual to the letter. | 196 | * chart in the manual to the letter. |
185 | */ | 197 | */ |
186 | outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); | 198 | outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND); |
187 | 199 | ||
188 | if (nsc_wait_for_ready(chip) != 0) | 200 | if (nsc_wait_for_ready(chip) != 0) |
189 | return -EIO; | 201 | return -EIO; |
190 | 202 | ||
191 | if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { | 203 | if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { |
192 | dev_err(chip->pdev, "IBF timeout\n"); | 204 | dev_err(&chip->dev, "IBF timeout\n"); |
193 | return -EIO; | 205 | return -EIO; |
194 | } | 206 | } |
195 | 207 | ||
196 | outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND); | 208 | outb(NSC_COMMAND_NORMAL, priv->base + NSC_COMMAND); |
197 | if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { | 209 | if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { |
198 | dev_err(chip->pdev, "IBR timeout\n"); | 210 | dev_err(&chip->dev, "IBR timeout\n"); |
199 | return -EIO; | 211 | return -EIO; |
200 | } | 212 | } |
201 | 213 | ||
202 | for (i = 0; i < count; i++) { | 214 | for (i = 0; i < count; i++) { |
203 | if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { | 215 | if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { |
204 | dev_err(chip->pdev, | 216 | dev_err(&chip->dev, |
205 | "IBF timeout (while writing data)\n"); | 217 | "IBF timeout (while writing data)\n"); |
206 | return -EIO; | 218 | return -EIO; |
207 | } | 219 | } |
208 | outb(buf[i], chip->vendor.base + NSC_DATA); | 220 | outb(buf[i], priv->base + NSC_DATA); |
209 | } | 221 | } |
210 | 222 | ||
211 | if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { | 223 | if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { |
212 | dev_err(chip->pdev, "IBF timeout\n"); | 224 | dev_err(&chip->dev, "IBF timeout\n"); |
213 | return -EIO; | 225 | return -EIO; |
214 | } | 226 | } |
215 | outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND); | 227 | outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND); |
216 | 228 | ||
217 | return count; | 229 | return count; |
218 | } | 230 | } |
219 | 231 | ||
220 | static void tpm_nsc_cancel(struct tpm_chip *chip) | 232 | static void tpm_nsc_cancel(struct tpm_chip *chip) |
221 | { | 233 | { |
222 | outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); | 234 | struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); |
235 | |||
236 | outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND); | ||
223 | } | 237 | } |
224 | 238 | ||
225 | static u8 tpm_nsc_status(struct tpm_chip *chip) | 239 | static u8 tpm_nsc_status(struct tpm_chip *chip) |
226 | { | 240 | { |
227 | return inb(chip->vendor.base + NSC_STATUS); | 241 | struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); |
242 | |||
243 | return inb(priv->base + NSC_STATUS); | ||
228 | } | 244 | } |
229 | 245 | ||
230 | static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status) | 246 | static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status) |
@@ -247,9 +263,10 @@ static struct platform_device *pdev = NULL; | |||
247 | static void tpm_nsc_remove(struct device *dev) | 263 | static void tpm_nsc_remove(struct device *dev) |
248 | { | 264 | { |
249 | struct tpm_chip *chip = dev_get_drvdata(dev); | 265 | struct tpm_chip *chip = dev_get_drvdata(dev); |
266 | struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); | ||
250 | 267 | ||
251 | tpm_chip_unregister(chip); | 268 | tpm_chip_unregister(chip); |
252 | release_region(chip->vendor.base, 2); | 269 | release_region(priv->base, 2); |
253 | } | 270 | } |
254 | 271 | ||
255 | static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume); | 272 | static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume); |
@@ -268,6 +285,7 @@ static int __init init_nsc(void) | |||
268 | int nscAddrBase = TPM_ADDR; | 285 | int nscAddrBase = TPM_ADDR; |
269 | struct tpm_chip *chip; | 286 | struct tpm_chip *chip; |
270 | unsigned long base; | 287 | unsigned long base; |
288 | struct tpm_nsc_priv *priv; | ||
271 | 289 | ||
272 | /* verify that it is a National part (SID) */ | 290 | /* verify that it is a National part (SID) */ |
273 | if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { | 291 | if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { |
@@ -301,6 +319,14 @@ static int __init init_nsc(void) | |||
301 | if ((rc = platform_device_add(pdev)) < 0) | 319 | if ((rc = platform_device_add(pdev)) < 0) |
302 | goto err_put_dev; | 320 | goto err_put_dev; |
303 | 321 | ||
322 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | ||
323 | if (!priv) { | ||
324 | rc = -ENOMEM; | ||
325 | goto err_del_dev; | ||
326 | } | ||
327 | |||
328 | priv->base = base; | ||
329 | |||
304 | if (request_region(base, 2, "tpm_nsc0") == NULL ) { | 330 | if (request_region(base, 2, "tpm_nsc0") == NULL ) { |
305 | rc = -EBUSY; | 331 | rc = -EBUSY; |
306 | goto err_del_dev; | 332 | goto err_del_dev; |
@@ -312,6 +338,8 @@ static int __init init_nsc(void) | |||
312 | goto err_rel_reg; | 338 | goto err_rel_reg; |
313 | } | 339 | } |
314 | 340 | ||
341 | dev_set_drvdata(&chip->dev, priv); | ||
342 | |||
315 | rc = tpm_chip_register(chip); | 343 | rc = tpm_chip_register(chip); |
316 | if (rc) | 344 | if (rc) |
317 | goto err_rel_reg; | 345 | goto err_rel_reg; |
@@ -349,8 +377,6 @@ static int __init init_nsc(void) | |||
349 | "NSC TPM revision %d\n", | 377 | "NSC TPM revision %d\n", |
350 | tpm_read_index(nscAddrBase, 0x27) & 0x1F); | 378 | tpm_read_index(nscAddrBase, 0x27) & 0x1F); |
351 | 379 | ||
352 | chip->vendor.base = base; | ||
353 | |||
354 | return 0; | 380 | return 0; |
355 | 381 | ||
356 | err_rel_reg: | 382 | err_rel_reg: |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index a507006728e0..eaf5730d79eb 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -29,40 +29,7 @@ | |||
29 | #include <linux/acpi.h> | 29 | #include <linux/acpi.h> |
30 | #include <linux/freezer.h> | 30 | #include <linux/freezer.h> |
31 | #include "tpm.h" | 31 | #include "tpm.h" |
32 | 32 | #include "tpm_tis_core.h" | |
33 | enum tis_access { | ||
34 | TPM_ACCESS_VALID = 0x80, | ||
35 | TPM_ACCESS_ACTIVE_LOCALITY = 0x20, | ||
36 | TPM_ACCESS_REQUEST_PENDING = 0x04, | ||
37 | TPM_ACCESS_REQUEST_USE = 0x02, | ||
38 | }; | ||
39 | |||
40 | enum tis_status { | ||
41 | TPM_STS_VALID = 0x80, | ||
42 | TPM_STS_COMMAND_READY = 0x40, | ||
43 | TPM_STS_GO = 0x20, | ||
44 | TPM_STS_DATA_AVAIL = 0x10, | ||
45 | TPM_STS_DATA_EXPECT = 0x08, | ||
46 | }; | ||
47 | |||
48 | enum tis_int_flags { | ||
49 | TPM_GLOBAL_INT_ENABLE = 0x80000000, | ||
50 | TPM_INTF_BURST_COUNT_STATIC = 0x100, | ||
51 | TPM_INTF_CMD_READY_INT = 0x080, | ||
52 | TPM_INTF_INT_EDGE_FALLING = 0x040, | ||
53 | TPM_INTF_INT_EDGE_RISING = 0x020, | ||
54 | TPM_INTF_INT_LEVEL_LOW = 0x010, | ||
55 | TPM_INTF_INT_LEVEL_HIGH = 0x008, | ||
56 | TPM_INTF_LOCALITY_CHANGE_INT = 0x004, | ||
57 | TPM_INTF_STS_VALID_INT = 0x002, | ||
58 | TPM_INTF_DATA_AVAIL_INT = 0x001, | ||
59 | }; | ||
60 | |||
61 | enum tis_defaults { | ||
62 | TIS_MEM_LEN = 0x5000, | ||
63 | TIS_SHORT_TIMEOUT = 750, /* ms */ | ||
64 | TIS_LONG_TIMEOUT = 2000, /* 2 sec */ | ||
65 | }; | ||
66 | 33 | ||
67 | struct tpm_info { | 34 | struct tpm_info { |
68 | struct resource res; | 35 | struct resource res; |
@@ -73,30 +40,30 @@ struct tpm_info { | |||
73 | int irq; | 40 | int irq; |
74 | }; | 41 | }; |
75 | 42 | ||
76 | /* Some timeout values are needed before it is known whether the chip is | 43 | struct tpm_tis_tcg_phy { |
77 | * TPM 1.0 or TPM 2.0. | 44 | struct tpm_tis_data priv; |
78 | */ | 45 | void __iomem *iobase; |
79 | #define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A) | ||
80 | #define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B) | ||
81 | #define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C) | ||
82 | #define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D) | ||
83 | |||
84 | #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) | ||
85 | #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) | ||
86 | #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) | ||
87 | #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) | ||
88 | #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) | ||
89 | #define TPM_STS(l) (0x0018 | ((l) << 12)) | ||
90 | #define TPM_STS3(l) (0x001b | ((l) << 12)) | ||
91 | #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) | ||
92 | |||
93 | #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) | ||
94 | #define TPM_RID(l) (0x0F04 | ((l) << 12)) | ||
95 | |||
96 | struct priv_data { | ||
97 | bool irq_tested; | ||
98 | }; | 46 | }; |
99 | 47 | ||
48 | static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data) | ||
49 | { | ||
50 | return container_of(data, struct tpm_tis_tcg_phy, priv); | ||
51 | } | ||
52 | |||
53 | static bool interrupts = true; | ||
54 | module_param(interrupts, bool, 0444); | ||
55 | MODULE_PARM_DESC(interrupts, "Enable interrupts"); | ||
56 | |||
57 | static bool itpm; | ||
58 | module_param(itpm, bool, 0444); | ||
59 | MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); | ||
60 | |||
61 | static bool force; | ||
62 | #ifdef CONFIG_X86 | ||
63 | module_param(force, bool, 0444); | ||
64 | MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); | ||
65 | #endif | ||
66 | |||
100 | #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) | 67 | #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) |
101 | static int has_hid(struct acpi_device *dev, const char *hid) | 68 | static int has_hid(struct acpi_device *dev, const char *hid) |
102 | { | 69 | { |
@@ -120,744 +87,82 @@ static inline int is_itpm(struct acpi_device *dev) | |||
120 | } | 87 | } |
121 | #endif | 88 | #endif |
122 | 89 | ||
123 | /* Before we attempt to access the TPM we must see that the valid bit is set. | 90 | static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, |
124 | * The specification says that this bit is 0 at reset and remains 0 until the | 91 | u8 *result) |
125 | * 'TPM has gone through its self test and initialization and has established | ||
126 | * correct values in the other bits.' */ | ||
127 | static int wait_startup(struct tpm_chip *chip, int l) | ||
128 | { | ||
129 | unsigned long stop = jiffies + chip->vendor.timeout_a; | ||
130 | do { | ||
131 | if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & | ||
132 | TPM_ACCESS_VALID) | ||
133 | return 0; | ||
134 | msleep(TPM_TIMEOUT); | ||
135 | } while (time_before(jiffies, stop)); | ||
136 | return -1; | ||
137 | } | ||
138 | |||
139 | static int check_locality(struct tpm_chip *chip, int l) | ||
140 | { | ||
141 | if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & | ||
142 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == | ||
143 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) | ||
144 | return chip->vendor.locality = l; | ||
145 | |||
146 | return -1; | ||
147 | } | ||
148 | |||
149 | static void release_locality(struct tpm_chip *chip, int l, int force) | ||
150 | { | ||
151 | if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & | ||
152 | (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == | ||
153 | (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) | ||
154 | iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, | ||
155 | chip->vendor.iobase + TPM_ACCESS(l)); | ||
156 | } | ||
157 | |||
158 | static int request_locality(struct tpm_chip *chip, int l) | ||
159 | { | ||
160 | unsigned long stop, timeout; | ||
161 | long rc; | ||
162 | |||
163 | if (check_locality(chip, l) >= 0) | ||
164 | return l; | ||
165 | |||
166 | iowrite8(TPM_ACCESS_REQUEST_USE, | ||
167 | chip->vendor.iobase + TPM_ACCESS(l)); | ||
168 | |||
169 | stop = jiffies + chip->vendor.timeout_a; | ||
170 | |||
171 | if (chip->vendor.irq) { | ||
172 | again: | ||
173 | timeout = stop - jiffies; | ||
174 | if ((long)timeout <= 0) | ||
175 | return -1; | ||
176 | rc = wait_event_interruptible_timeout(chip->vendor.int_queue, | ||
177 | (check_locality | ||
178 | (chip, l) >= 0), | ||
179 | timeout); | ||
180 | if (rc > 0) | ||
181 | return l; | ||
182 | if (rc == -ERESTARTSYS && freezing(current)) { | ||
183 | clear_thread_flag(TIF_SIGPENDING); | ||
184 | goto again; | ||
185 | } | ||
186 | } else { | ||
187 | /* wait for burstcount */ | ||
188 | do { | ||
189 | if (check_locality(chip, l) >= 0) | ||
190 | return l; | ||
191 | msleep(TPM_TIMEOUT); | ||
192 | } | ||
193 | while (time_before(jiffies, stop)); | ||
194 | } | ||
195 | return -1; | ||
196 | } | ||
197 | |||
198 | static u8 tpm_tis_status(struct tpm_chip *chip) | ||
199 | { | ||
200 | return ioread8(chip->vendor.iobase + | ||
201 | TPM_STS(chip->vendor.locality)); | ||
202 | } | ||
203 | |||
204 | static void tpm_tis_ready(struct tpm_chip *chip) | ||
205 | { | ||
206 | /* this causes the current command to be aborted */ | ||
207 | iowrite8(TPM_STS_COMMAND_READY, | ||
208 | chip->vendor.iobase + TPM_STS(chip->vendor.locality)); | ||
209 | } | ||
210 | |||
211 | static int get_burstcount(struct tpm_chip *chip) | ||
212 | { | ||
213 | unsigned long stop; | ||
214 | int burstcnt; | ||
215 | |||
216 | /* wait for burstcount */ | ||
217 | /* which timeout value, spec has 2 answers (c & d) */ | ||
218 | stop = jiffies + chip->vendor.timeout_d; | ||
219 | do { | ||
220 | burstcnt = ioread8(chip->vendor.iobase + | ||
221 | TPM_STS(chip->vendor.locality) + 1); | ||
222 | burstcnt += ioread8(chip->vendor.iobase + | ||
223 | TPM_STS(chip->vendor.locality) + | ||
224 | 2) << 8; | ||
225 | if (burstcnt) | ||
226 | return burstcnt; | ||
227 | msleep(TPM_TIMEOUT); | ||
228 | } while (time_before(jiffies, stop)); | ||
229 | return -EBUSY; | ||
230 | } | ||
231 | |||
232 | static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | ||
233 | { | 92 | { |
234 | int size = 0, burstcnt; | 93 | struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); |
235 | while (size < count && | ||
236 | wait_for_tpm_stat(chip, | ||
237 | TPM_STS_DATA_AVAIL | TPM_STS_VALID, | ||
238 | chip->vendor.timeout_c, | ||
239 | &chip->vendor.read_queue, true) | ||
240 | == 0) { | ||
241 | burstcnt = get_burstcount(chip); | ||
242 | for (; burstcnt > 0 && size < count; burstcnt--) | ||
243 | buf[size++] = ioread8(chip->vendor.iobase + | ||
244 | TPM_DATA_FIFO(chip->vendor. | ||
245 | locality)); | ||
246 | } | ||
247 | return size; | ||
248 | } | ||
249 | |||
250 | static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | ||
251 | { | ||
252 | int size = 0; | ||
253 | int expected, status; | ||
254 | |||
255 | if (count < TPM_HEADER_SIZE) { | ||
256 | size = -EIO; | ||
257 | goto out; | ||
258 | } | ||
259 | |||
260 | /* read first 10 bytes, including tag, paramsize, and result */ | ||
261 | if ((size = | ||
262 | recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { | ||
263 | dev_err(chip->pdev, "Unable to read header\n"); | ||
264 | goto out; | ||
265 | } | ||
266 | |||
267 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | ||
268 | if (expected > count) { | ||
269 | size = -EIO; | ||
270 | goto out; | ||
271 | } | ||
272 | |||
273 | if ((size += | ||
274 | recv_data(chip, &buf[TPM_HEADER_SIZE], | ||
275 | expected - TPM_HEADER_SIZE)) < expected) { | ||
276 | dev_err(chip->pdev, "Unable to read remainder of result\n"); | ||
277 | size = -ETIME; | ||
278 | goto out; | ||
279 | } | ||
280 | |||
281 | wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, | ||
282 | &chip->vendor.int_queue, false); | ||
283 | status = tpm_tis_status(chip); | ||
284 | if (status & TPM_STS_DATA_AVAIL) { /* retry? */ | ||
285 | dev_err(chip->pdev, "Error left over data\n"); | ||
286 | size = -EIO; | ||
287 | goto out; | ||
288 | } | ||
289 | |||
290 | out: | ||
291 | tpm_tis_ready(chip); | ||
292 | release_locality(chip, chip->vendor.locality, 0); | ||
293 | return size; | ||
294 | } | ||
295 | |||
296 | static bool itpm; | ||
297 | module_param(itpm, bool, 0444); | ||
298 | MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); | ||
299 | |||
300 | /* | ||
301 | * If interrupts are used (signaled by an irq set in the vendor structure) | ||
302 | * tpm.c can skip polling for the data to be available as the interrupt is | ||
303 | * waited for here | ||
304 | */ | ||
305 | static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) | ||
306 | { | ||
307 | int rc, status, burstcnt; | ||
308 | size_t count = 0; | ||
309 | |||
310 | if (request_locality(chip, 0) < 0) | ||
311 | return -EBUSY; | ||
312 | |||
313 | status = tpm_tis_status(chip); | ||
314 | if ((status & TPM_STS_COMMAND_READY) == 0) { | ||
315 | tpm_tis_ready(chip); | ||
316 | if (wait_for_tpm_stat | ||
317 | (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, | ||
318 | &chip->vendor.int_queue, false) < 0) { | ||
319 | rc = -ETIME; | ||
320 | goto out_err; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | while (count < len - 1) { | ||
325 | burstcnt = get_burstcount(chip); | ||
326 | for (; burstcnt > 0 && count < len - 1; burstcnt--) { | ||
327 | iowrite8(buf[count], chip->vendor.iobase + | ||
328 | TPM_DATA_FIFO(chip->vendor.locality)); | ||
329 | count++; | ||
330 | } | ||
331 | |||
332 | wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, | ||
333 | &chip->vendor.int_queue, false); | ||
334 | status = tpm_tis_status(chip); | ||
335 | if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { | ||
336 | rc = -EIO; | ||
337 | goto out_err; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | /* write last byte */ | ||
342 | iowrite8(buf[count], | ||
343 | chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); | ||
344 | wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, | ||
345 | &chip->vendor.int_queue, false); | ||
346 | status = tpm_tis_status(chip); | ||
347 | if ((status & TPM_STS_DATA_EXPECT) != 0) { | ||
348 | rc = -EIO; | ||
349 | goto out_err; | ||
350 | } | ||
351 | 94 | ||
95 | while (len--) | ||
96 | *result++ = ioread8(phy->iobase + addr); | ||
352 | return 0; | 97 | return 0; |
353 | |||
354 | out_err: | ||
355 | tpm_tis_ready(chip); | ||
356 | release_locality(chip, chip->vendor.locality, 0); | ||
357 | return rc; | ||
358 | } | ||
359 | |||
360 | static void disable_interrupts(struct tpm_chip *chip) | ||
361 | { | ||
362 | u32 intmask; | ||
363 | |||
364 | intmask = | ||
365 | ioread32(chip->vendor.iobase + | ||
366 | TPM_INT_ENABLE(chip->vendor.locality)); | ||
367 | intmask &= ~TPM_GLOBAL_INT_ENABLE; | ||
368 | iowrite32(intmask, | ||
369 | chip->vendor.iobase + | ||
370 | TPM_INT_ENABLE(chip->vendor.locality)); | ||
371 | devm_free_irq(chip->pdev, chip->vendor.irq, chip); | ||
372 | chip->vendor.irq = 0; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * If interrupts are used (signaled by an irq set in the vendor structure) | ||
377 | * tpm.c can skip polling for the data to be available as the interrupt is | ||
378 | * waited for here | ||
379 | */ | ||
380 | static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) | ||
381 | { | ||
382 | int rc; | ||
383 | u32 ordinal; | ||
384 | unsigned long dur; | ||
385 | |||
386 | rc = tpm_tis_send_data(chip, buf, len); | ||
387 | if (rc < 0) | ||
388 | return rc; | ||
389 | |||
390 | /* go and do it */ | ||
391 | iowrite8(TPM_STS_GO, | ||
392 | chip->vendor.iobase + TPM_STS(chip->vendor.locality)); | ||
393 | |||
394 | if (chip->vendor.irq) { | ||
395 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); | ||
396 | |||
397 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
398 | dur = tpm2_calc_ordinal_duration(chip, ordinal); | ||
399 | else | ||
400 | dur = tpm_calc_ordinal_duration(chip, ordinal); | ||
401 | |||
402 | if (wait_for_tpm_stat | ||
403 | (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, | ||
404 | &chip->vendor.read_queue, false) < 0) { | ||
405 | rc = -ETIME; | ||
406 | goto out_err; | ||
407 | } | ||
408 | } | ||
409 | return len; | ||
410 | out_err: | ||
411 | tpm_tis_ready(chip); | ||
412 | release_locality(chip, chip->vendor.locality, 0); | ||
413 | return rc; | ||
414 | } | ||
415 | |||
416 | static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) | ||
417 | { | ||
418 | int rc, irq; | ||
419 | struct priv_data *priv = chip->vendor.priv; | ||
420 | |||
421 | if (!chip->vendor.irq || priv->irq_tested) | ||
422 | return tpm_tis_send_main(chip, buf, len); | ||
423 | |||
424 | /* Verify receipt of the expected IRQ */ | ||
425 | irq = chip->vendor.irq; | ||
426 | chip->vendor.irq = 0; | ||
427 | rc = tpm_tis_send_main(chip, buf, len); | ||
428 | chip->vendor.irq = irq; | ||
429 | if (!priv->irq_tested) | ||
430 | msleep(1); | ||
431 | if (!priv->irq_tested) | ||
432 | disable_interrupts(chip); | ||
433 | priv->irq_tested = true; | ||
434 | return rc; | ||
435 | } | 98 | } |
436 | 99 | ||
437 | struct tis_vendor_timeout_override { | 100 | static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, |
438 | u32 did_vid; | 101 | u8 *value) |
439 | unsigned long timeout_us[4]; | ||
440 | }; | ||
441 | |||
442 | static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { | ||
443 | /* Atmel 3204 */ | ||
444 | { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), | ||
445 | (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, | ||
446 | }; | ||
447 | |||
448 | static bool tpm_tis_update_timeouts(struct tpm_chip *chip, | ||
449 | unsigned long *timeout_cap) | ||
450 | { | 102 | { |
451 | int i; | 103 | struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); |
452 | u32 did_vid; | ||
453 | 104 | ||
454 | did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); | 105 | while (len--) |
455 | 106 | iowrite8(*value++, phy->iobase + addr); | |
456 | for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { | 107 | return 0; |
457 | if (vendor_timeout_overrides[i].did_vid != did_vid) | ||
458 | continue; | ||
459 | memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, | ||
460 | sizeof(vendor_timeout_overrides[i].timeout_us)); | ||
461 | return true; | ||
462 | } | ||
463 | |||
464 | return false; | ||
465 | } | 108 | } |
466 | 109 | ||
467 | /* | 110 | static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result) |
468 | * Early probing for iTPM with STS_DATA_EXPECT flaw. | ||
469 | * Try sending command without itpm flag set and if that | ||
470 | * fails, repeat with itpm flag set. | ||
471 | */ | ||
472 | static int probe_itpm(struct tpm_chip *chip) | ||
473 | { | 111 | { |
474 | int rc = 0; | 112 | struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); |
475 | u8 cmd_getticks[] = { | ||
476 | 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, | ||
477 | 0x00, 0x00, 0x00, 0xf1 | ||
478 | }; | ||
479 | size_t len = sizeof(cmd_getticks); | ||
480 | bool rem_itpm = itpm; | ||
481 | u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0)); | ||
482 | |||
483 | /* probe only iTPMS */ | ||
484 | if (vendor != TPM_VID_INTEL) | ||
485 | return 0; | ||
486 | |||
487 | itpm = false; | ||
488 | |||
489 | rc = tpm_tis_send_data(chip, cmd_getticks, len); | ||
490 | if (rc == 0) | ||
491 | goto out; | ||
492 | |||
493 | tpm_tis_ready(chip); | ||
494 | release_locality(chip, chip->vendor.locality, 0); | ||
495 | 113 | ||
496 | itpm = true; | 114 | *result = ioread16(phy->iobase + addr); |
497 | 115 | return 0; | |
498 | rc = tpm_tis_send_data(chip, cmd_getticks, len); | ||
499 | if (rc == 0) { | ||
500 | dev_info(chip->pdev, "Detected an iTPM.\n"); | ||
501 | rc = 1; | ||
502 | } else | ||
503 | rc = -EFAULT; | ||
504 | |||
505 | out: | ||
506 | itpm = rem_itpm; | ||
507 | tpm_tis_ready(chip); | ||
508 | release_locality(chip, chip->vendor.locality, 0); | ||
509 | |||
510 | return rc; | ||
511 | } | 116 | } |
512 | 117 | ||
513 | static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) | 118 | static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result) |
514 | { | 119 | { |
515 | switch (chip->vendor.manufacturer_id) { | 120 | struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); |
516 | case TPM_VID_WINBOND: | ||
517 | return ((status == TPM_STS_VALID) || | ||
518 | (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); | ||
519 | case TPM_VID_STM: | ||
520 | return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); | ||
521 | default: | ||
522 | return (status == TPM_STS_COMMAND_READY); | ||
523 | } | ||
524 | } | ||
525 | |||
526 | static const struct tpm_class_ops tpm_tis = { | ||
527 | .status = tpm_tis_status, | ||
528 | .recv = tpm_tis_recv, | ||
529 | .send = tpm_tis_send, | ||
530 | .cancel = tpm_tis_ready, | ||
531 | .update_timeouts = tpm_tis_update_timeouts, | ||
532 | .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, | ||
533 | .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, | ||
534 | .req_canceled = tpm_tis_req_canceled, | ||
535 | }; | ||
536 | 121 | ||
537 | static irqreturn_t tis_int_handler(int dummy, void *dev_id) | 122 | *result = ioread32(phy->iobase + addr); |
538 | { | 123 | return 0; |
539 | struct tpm_chip *chip = dev_id; | ||
540 | u32 interrupt; | ||
541 | int i; | ||
542 | |||
543 | interrupt = ioread32(chip->vendor.iobase + | ||
544 | TPM_INT_STATUS(chip->vendor.locality)); | ||
545 | |||
546 | if (interrupt == 0) | ||
547 | return IRQ_NONE; | ||
548 | |||
549 | ((struct priv_data *)chip->vendor.priv)->irq_tested = true; | ||
550 | if (interrupt & TPM_INTF_DATA_AVAIL_INT) | ||
551 | wake_up_interruptible(&chip->vendor.read_queue); | ||
552 | if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) | ||
553 | for (i = 0; i < 5; i++) | ||
554 | if (check_locality(chip, i) >= 0) | ||
555 | break; | ||
556 | if (interrupt & | ||
557 | (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | | ||
558 | TPM_INTF_CMD_READY_INT)) | ||
559 | wake_up_interruptible(&chip->vendor.int_queue); | ||
560 | |||
561 | /* Clear interrupts handled with TPM_EOI */ | ||
562 | iowrite32(interrupt, | ||
563 | chip->vendor.iobase + | ||
564 | TPM_INT_STATUS(chip->vendor.locality)); | ||
565 | ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); | ||
566 | return IRQ_HANDLED; | ||
567 | } | 124 | } |
568 | 125 | ||
569 | /* Register the IRQ and issue a command that will cause an interrupt. If an | 126 | static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) |
570 | * irq is seen then leave the chip setup for IRQ operation, otherwise reverse | ||
571 | * everything and leave in polling mode. Returns 0 on success. | ||
572 | */ | ||
573 | static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, | ||
574 | int flags, int irq) | ||
575 | { | 127 | { |
576 | struct priv_data *priv = chip->vendor.priv; | 128 | struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); |
577 | u8 original_int_vec; | ||
578 | |||
579 | if (devm_request_irq(chip->pdev, irq, tis_int_handler, flags, | ||
580 | chip->devname, chip) != 0) { | ||
581 | dev_info(chip->pdev, "Unable to request irq: %d for probe\n", | ||
582 | irq); | ||
583 | return -1; | ||
584 | } | ||
585 | chip->vendor.irq = irq; | ||
586 | |||
587 | original_int_vec = ioread8(chip->vendor.iobase + | ||
588 | TPM_INT_VECTOR(chip->vendor.locality)); | ||
589 | iowrite8(irq, | ||
590 | chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); | ||
591 | |||
592 | /* Clear all existing */ | ||
593 | iowrite32(ioread32(chip->vendor.iobase + | ||
594 | TPM_INT_STATUS(chip->vendor.locality)), | ||
595 | chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); | ||
596 | |||
597 | /* Turn on */ | ||
598 | iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, | ||
599 | chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); | ||
600 | |||
601 | priv->irq_tested = false; | ||
602 | |||
603 | /* Generate an interrupt by having the core call through to | ||
604 | * tpm_tis_send | ||
605 | */ | ||
606 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
607 | tpm2_gen_interrupt(chip); | ||
608 | else | ||
609 | tpm_gen_interrupt(chip); | ||
610 | |||
611 | /* tpm_tis_send will either confirm the interrupt is working or it | ||
612 | * will call disable_irq which undoes all of the above. | ||
613 | */ | ||
614 | if (!chip->vendor.irq) { | ||
615 | iowrite8(original_int_vec, | ||
616 | chip->vendor.iobase + | ||
617 | TPM_INT_VECTOR(chip->vendor.locality)); | ||
618 | return 1; | ||
619 | } | ||
620 | 129 | ||
130 | iowrite32(value, phy->iobase + addr); | ||
621 | return 0; | 131 | return 0; |
622 | } | 132 | } |
623 | 133 | ||
624 | /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that | 134 | static const struct tpm_tis_phy_ops tpm_tcg = { |
625 | * do not have ACPI/etc. We typically expect the interrupt to be declared if | 135 | .read_bytes = tpm_tcg_read_bytes, |
626 | * present. | 136 | .write_bytes = tpm_tcg_write_bytes, |
627 | */ | 137 | .read16 = tpm_tcg_read16, |
628 | static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask) | 138 | .read32 = tpm_tcg_read32, |
629 | { | 139 | .write32 = tpm_tcg_write32, |
630 | u8 original_int_vec; | 140 | }; |
631 | int i; | ||
632 | |||
633 | original_int_vec = ioread8(chip->vendor.iobase + | ||
634 | TPM_INT_VECTOR(chip->vendor.locality)); | ||
635 | |||
636 | if (!original_int_vec) { | ||
637 | if (IS_ENABLED(CONFIG_X86)) | ||
638 | for (i = 3; i <= 15; i++) | ||
639 | if (!tpm_tis_probe_irq_single(chip, intmask, 0, | ||
640 | i)) | ||
641 | return; | ||
642 | } else if (!tpm_tis_probe_irq_single(chip, intmask, 0, | ||
643 | original_int_vec)) | ||
644 | return; | ||
645 | } | ||
646 | |||
647 | static bool interrupts = true; | ||
648 | module_param(interrupts, bool, 0444); | ||
649 | MODULE_PARM_DESC(interrupts, "Enable interrupts"); | ||
650 | |||
651 | static void tpm_tis_remove(struct tpm_chip *chip) | ||
652 | { | ||
653 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
654 | tpm2_shutdown(chip, TPM2_SU_CLEAR); | ||
655 | |||
656 | iowrite32(~TPM_GLOBAL_INT_ENABLE & | ||
657 | ioread32(chip->vendor.iobase + | ||
658 | TPM_INT_ENABLE(chip->vendor. | ||
659 | locality)), | ||
660 | chip->vendor.iobase + | ||
661 | TPM_INT_ENABLE(chip->vendor.locality)); | ||
662 | release_locality(chip, chip->vendor.locality, 1); | ||
663 | } | ||
664 | 141 | ||
665 | static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, | 142 | static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, |
666 | acpi_handle acpi_dev_handle) | 143 | acpi_handle acpi_dev_handle) |
667 | { | 144 | { |
668 | u32 vendor, intfcaps, intmask; | 145 | struct tpm_tis_tcg_phy *phy; |
669 | int rc, probe; | 146 | int irq = -1; |
670 | struct tpm_chip *chip; | ||
671 | struct priv_data *priv; | ||
672 | 147 | ||
673 | priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); | 148 | phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL); |
674 | if (priv == NULL) | 149 | if (phy == NULL) |
675 | return -ENOMEM; | 150 | return -ENOMEM; |
676 | 151 | ||
677 | chip = tpmm_chip_alloc(dev, &tpm_tis); | 152 | phy->iobase = devm_ioremap_resource(dev, &tpm_info->res); |
678 | if (IS_ERR(chip)) | 153 | if (IS_ERR(phy->iobase)) |
679 | return PTR_ERR(chip); | 154 | return PTR_ERR(phy->iobase); |
680 | |||
681 | chip->vendor.priv = priv; | ||
682 | #ifdef CONFIG_ACPI | ||
683 | chip->acpi_dev_handle = acpi_dev_handle; | ||
684 | #endif | ||
685 | 155 | ||
686 | chip->vendor.iobase = devm_ioremap_resource(dev, &tpm_info->res); | 156 | if (interrupts) |
687 | if (IS_ERR(chip->vendor.iobase)) | 157 | irq = tpm_info->irq; |
688 | return PTR_ERR(chip->vendor.iobase); | ||
689 | |||
690 | /* Maximum timeouts */ | ||
691 | chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX; | ||
692 | chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX; | ||
693 | chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX; | ||
694 | chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX; | ||
695 | |||
696 | if (wait_startup(chip, 0) != 0) { | ||
697 | rc = -ENODEV; | ||
698 | goto out_err; | ||
699 | } | ||
700 | |||
701 | /* Take control of the TPM's interrupt hardware and shut it off */ | ||
702 | intmask = ioread32(chip->vendor.iobase + | ||
703 | TPM_INT_ENABLE(chip->vendor.locality)); | ||
704 | intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | | ||
705 | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; | ||
706 | intmask &= ~TPM_GLOBAL_INT_ENABLE; | ||
707 | iowrite32(intmask, | ||
708 | chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); | ||
709 | |||
710 | if (request_locality(chip, 0) != 0) { | ||
711 | rc = -ENODEV; | ||
712 | goto out_err; | ||
713 | } | ||
714 | |||
715 | rc = tpm2_probe(chip); | ||
716 | if (rc) | ||
717 | goto out_err; | ||
718 | |||
719 | vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); | ||
720 | chip->vendor.manufacturer_id = vendor; | ||
721 | |||
722 | dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", | ||
723 | (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", | ||
724 | vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); | ||
725 | |||
726 | if (!itpm) { | ||
727 | probe = probe_itpm(chip); | ||
728 | if (probe < 0) { | ||
729 | rc = -ENODEV; | ||
730 | goto out_err; | ||
731 | } | ||
732 | itpm = !!probe; | ||
733 | } | ||
734 | 158 | ||
735 | if (itpm) | 159 | if (itpm) |
736 | dev_info(dev, "Intel iTPM workaround enabled\n"); | 160 | phy->priv.flags |= TPM_TIS_ITPM_POSSIBLE; |
737 | |||
738 | |||
739 | /* Figure out the capabilities */ | ||
740 | intfcaps = | ||
741 | ioread32(chip->vendor.iobase + | ||
742 | TPM_INTF_CAPS(chip->vendor.locality)); | ||
743 | dev_dbg(dev, "TPM interface capabilities (0x%x):\n", | ||
744 | intfcaps); | ||
745 | if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) | ||
746 | dev_dbg(dev, "\tBurst Count Static\n"); | ||
747 | if (intfcaps & TPM_INTF_CMD_READY_INT) | ||
748 | dev_dbg(dev, "\tCommand Ready Int Support\n"); | ||
749 | if (intfcaps & TPM_INTF_INT_EDGE_FALLING) | ||
750 | dev_dbg(dev, "\tInterrupt Edge Falling\n"); | ||
751 | if (intfcaps & TPM_INTF_INT_EDGE_RISING) | ||
752 | dev_dbg(dev, "\tInterrupt Edge Rising\n"); | ||
753 | if (intfcaps & TPM_INTF_INT_LEVEL_LOW) | ||
754 | dev_dbg(dev, "\tInterrupt Level Low\n"); | ||
755 | if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) | ||
756 | dev_dbg(dev, "\tInterrupt Level High\n"); | ||
757 | if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) | ||
758 | dev_dbg(dev, "\tLocality Change Int Support\n"); | ||
759 | if (intfcaps & TPM_INTF_STS_VALID_INT) | ||
760 | dev_dbg(dev, "\tSts Valid Int Support\n"); | ||
761 | if (intfcaps & TPM_INTF_DATA_AVAIL_INT) | ||
762 | dev_dbg(dev, "\tData Avail Int Support\n"); | ||
763 | |||
764 | /* Very early on issue a command to the TPM in polling mode to make | ||
765 | * sure it works. May as well use that command to set the proper | ||
766 | * timeouts for the driver. | ||
767 | */ | ||
768 | if (tpm_get_timeouts(chip)) { | ||
769 | dev_err(dev, "Could not get TPM timeouts and durations\n"); | ||
770 | rc = -ENODEV; | ||
771 | goto out_err; | ||
772 | } | ||
773 | |||
774 | /* INTERRUPT Setup */ | ||
775 | init_waitqueue_head(&chip->vendor.read_queue); | ||
776 | init_waitqueue_head(&chip->vendor.int_queue); | ||
777 | if (interrupts && tpm_info->irq != -1) { | ||
778 | if (tpm_info->irq) { | ||
779 | tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, | ||
780 | tpm_info->irq); | ||
781 | if (!chip->vendor.irq) | ||
782 | dev_err(chip->pdev, FW_BUG | ||
783 | "TPM interrupt not working, polling instead\n"); | ||
784 | } else | ||
785 | tpm_tis_probe_irq(chip, intmask); | ||
786 | } | ||
787 | 161 | ||
788 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { | 162 | return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg, |
789 | rc = tpm2_do_selftest(chip); | 163 | acpi_dev_handle); |
790 | if (rc == TPM2_RC_INITIALIZE) { | ||
791 | dev_warn(dev, "Firmware has not started TPM\n"); | ||
792 | rc = tpm2_startup(chip, TPM2_SU_CLEAR); | ||
793 | if (!rc) | ||
794 | rc = tpm2_do_selftest(chip); | ||
795 | } | ||
796 | |||
797 | if (rc) { | ||
798 | dev_err(dev, "TPM self test failed\n"); | ||
799 | if (rc > 0) | ||
800 | rc = -ENODEV; | ||
801 | goto out_err; | ||
802 | } | ||
803 | } else { | ||
804 | if (tpm_do_selftest(chip)) { | ||
805 | dev_err(dev, "TPM self test failed\n"); | ||
806 | rc = -ENODEV; | ||
807 | goto out_err; | ||
808 | } | ||
809 | } | ||
810 | |||
811 | return tpm_chip_register(chip); | ||
812 | out_err: | ||
813 | tpm_tis_remove(chip); | ||
814 | return rc; | ||
815 | } | 164 | } |
816 | 165 | ||
817 | #ifdef CONFIG_PM_SLEEP | ||
818 | static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) | ||
819 | { | ||
820 | u32 intmask; | ||
821 | |||
822 | /* reenable interrupts that device may have lost or | ||
823 | BIOS/firmware may have disabled */ | ||
824 | iowrite8(chip->vendor.irq, chip->vendor.iobase + | ||
825 | TPM_INT_VECTOR(chip->vendor.locality)); | ||
826 | |||
827 | intmask = | ||
828 | ioread32(chip->vendor.iobase + | ||
829 | TPM_INT_ENABLE(chip->vendor.locality)); | ||
830 | |||
831 | intmask |= TPM_INTF_CMD_READY_INT | ||
832 | | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | ||
833 | | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; | ||
834 | |||
835 | iowrite32(intmask, | ||
836 | chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); | ||
837 | } | ||
838 | |||
839 | static int tpm_tis_resume(struct device *dev) | ||
840 | { | ||
841 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
842 | int ret; | ||
843 | |||
844 | if (chip->vendor.irq) | ||
845 | tpm_tis_reenable_interrupts(chip); | ||
846 | |||
847 | ret = tpm_pm_resume(dev); | ||
848 | if (ret) | ||
849 | return ret; | ||
850 | |||
851 | /* TPM 1.2 requires self-test on resume. This function actually returns | ||
852 | * an error code but for unknown reason it isn't handled. | ||
853 | */ | ||
854 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) | ||
855 | tpm_do_selftest(chip); | ||
856 | |||
857 | return 0; | ||
858 | } | ||
859 | #endif | ||
860 | |||
861 | static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); | 166 | static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); |
862 | 167 | ||
863 | static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, | 168 | static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, |
@@ -1058,12 +363,6 @@ static struct platform_driver tis_drv = { | |||
1058 | }, | 363 | }, |
1059 | }; | 364 | }; |
1060 | 365 | ||
1061 | static bool force; | ||
1062 | #ifdef CONFIG_X86 | ||
1063 | module_param(force, bool, 0444); | ||
1064 | MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); | ||
1065 | #endif | ||
1066 | |||
1067 | static int tpm_tis_force_device(void) | 366 | static int tpm_tis_force_device(void) |
1068 | { | 367 | { |
1069 | struct platform_device *pdev; | 368 | struct platform_device *pdev; |
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c new file mode 100644 index 000000000000..d66f51b3648e --- /dev/null +++ b/drivers/char/tpm/tpm_tis_core.c | |||
@@ -0,0 +1,835 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005, 2006 IBM Corporation | ||
3 | * Copyright (C) 2014, 2015 Intel Corporation | ||
4 | * | ||
5 | * Authors: | ||
6 | * Leendert van Doorn <leendert@watson.ibm.com> | ||
7 | * Kylene Hall <kjhall@us.ibm.com> | ||
8 | * | ||
9 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> | ||
10 | * | ||
11 | * Device driver for TCG/TCPA TPM (trusted platform module). | ||
12 | * Specifications at www.trustedcomputinggroup.org | ||
13 | * | ||
14 | * This device driver implements the TPM interface as defined in | ||
15 | * the TCG TPM Interface Spec version 1.2, revision 1.0. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License as | ||
19 | * published by the Free Software Foundation, version 2 of the | ||
20 | * License. | ||
21 | */ | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/pnp.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/wait.h> | ||
29 | #include <linux/acpi.h> | ||
30 | #include <linux/freezer.h> | ||
31 | #include "tpm.h" | ||
32 | #include "tpm_tis_core.h" | ||
33 | |||
34 | /* Before we attempt to access the TPM we must see that the valid bit is set. | ||
35 | * The specification says that this bit is 0 at reset and remains 0 until the | ||
36 | * 'TPM has gone through its self test and initialization and has established | ||
37 | * correct values in the other bits.' | ||
38 | */ | ||
39 | static int wait_startup(struct tpm_chip *chip, int l) | ||
40 | { | ||
41 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
42 | unsigned long stop = jiffies + chip->timeout_a; | ||
43 | |||
44 | do { | ||
45 | int rc; | ||
46 | u8 access; | ||
47 | |||
48 | rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); | ||
49 | if (rc < 0) | ||
50 | return rc; | ||
51 | |||
52 | if (access & TPM_ACCESS_VALID) | ||
53 | return 0; | ||
54 | msleep(TPM_TIMEOUT); | ||
55 | } while (time_before(jiffies, stop)); | ||
56 | return -1; | ||
57 | } | ||
58 | |||
59 | static int check_locality(struct tpm_chip *chip, int l) | ||
60 | { | ||
61 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
62 | int rc; | ||
63 | u8 access; | ||
64 | |||
65 | rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); | ||
66 | if (rc < 0) | ||
67 | return rc; | ||
68 | |||
69 | if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == | ||
70 | (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) | ||
71 | return priv->locality = l; | ||
72 | |||
73 | return -1; | ||
74 | } | ||
75 | |||
76 | static void release_locality(struct tpm_chip *chip, int l, int force) | ||
77 | { | ||
78 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
79 | int rc; | ||
80 | u8 access; | ||
81 | |||
82 | rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); | ||
83 | if (rc < 0) | ||
84 | return; | ||
85 | |||
86 | if (force || (access & | ||
87 | (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == | ||
88 | (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) | ||
89 | tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); | ||
90 | |||
91 | } | ||
92 | |||
93 | static int request_locality(struct tpm_chip *chip, int l) | ||
94 | { | ||
95 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
96 | unsigned long stop, timeout; | ||
97 | long rc; | ||
98 | |||
99 | if (check_locality(chip, l) >= 0) | ||
100 | return l; | ||
101 | |||
102 | rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE); | ||
103 | if (rc < 0) | ||
104 | return rc; | ||
105 | |||
106 | stop = jiffies + chip->timeout_a; | ||
107 | |||
108 | if (chip->flags & TPM_CHIP_FLAG_IRQ) { | ||
109 | again: | ||
110 | timeout = stop - jiffies; | ||
111 | if ((long)timeout <= 0) | ||
112 | return -1; | ||
113 | rc = wait_event_interruptible_timeout(priv->int_queue, | ||
114 | (check_locality | ||
115 | (chip, l) >= 0), | ||
116 | timeout); | ||
117 | if (rc > 0) | ||
118 | return l; | ||
119 | if (rc == -ERESTARTSYS && freezing(current)) { | ||
120 | clear_thread_flag(TIF_SIGPENDING); | ||
121 | goto again; | ||
122 | } | ||
123 | } else { | ||
124 | /* wait for burstcount */ | ||
125 | do { | ||
126 | if (check_locality(chip, l) >= 0) | ||
127 | return l; | ||
128 | msleep(TPM_TIMEOUT); | ||
129 | } while (time_before(jiffies, stop)); | ||
130 | } | ||
131 | return -1; | ||
132 | } | ||
133 | |||
134 | static u8 tpm_tis_status(struct tpm_chip *chip) | ||
135 | { | ||
136 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
137 | int rc; | ||
138 | u8 status; | ||
139 | |||
140 | rc = tpm_tis_read8(priv, TPM_STS(priv->locality), &status); | ||
141 | if (rc < 0) | ||
142 | return 0; | ||
143 | |||
144 | return status; | ||
145 | } | ||
146 | |||
147 | static void tpm_tis_ready(struct tpm_chip *chip) | ||
148 | { | ||
149 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
150 | |||
151 | /* this causes the current command to be aborted */ | ||
152 | tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_COMMAND_READY); | ||
153 | } | ||
154 | |||
155 | static int get_burstcount(struct tpm_chip *chip) | ||
156 | { | ||
157 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
158 | unsigned long stop; | ||
159 | int burstcnt, rc; | ||
160 | u32 value; | ||
161 | |||
162 | /* wait for burstcount */ | ||
163 | /* which timeout value, spec has 2 answers (c & d) */ | ||
164 | stop = jiffies + chip->timeout_d; | ||
165 | do { | ||
166 | rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value); | ||
167 | if (rc < 0) | ||
168 | return rc; | ||
169 | |||
170 | burstcnt = (value >> 8) & 0xFFFF; | ||
171 | if (burstcnt) | ||
172 | return burstcnt; | ||
173 | msleep(TPM_TIMEOUT); | ||
174 | } while (time_before(jiffies, stop)); | ||
175 | return -EBUSY; | ||
176 | } | ||
177 | |||
178 | static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | ||
179 | { | ||
180 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
181 | int size = 0, burstcnt, rc; | ||
182 | |||
183 | while (size < count && | ||
184 | wait_for_tpm_stat(chip, | ||
185 | TPM_STS_DATA_AVAIL | TPM_STS_VALID, | ||
186 | chip->timeout_c, | ||
187 | &priv->read_queue, true) == 0) { | ||
188 | burstcnt = min_t(int, get_burstcount(chip), count - size); | ||
189 | |||
190 | rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality), | ||
191 | burstcnt, buf + size); | ||
192 | if (rc < 0) | ||
193 | return rc; | ||
194 | |||
195 | size += burstcnt; | ||
196 | } | ||
197 | return size; | ||
198 | } | ||
199 | |||
200 | static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | ||
201 | { | ||
202 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
203 | int size = 0; | ||
204 | int expected, status; | ||
205 | |||
206 | if (count < TPM_HEADER_SIZE) { | ||
207 | size = -EIO; | ||
208 | goto out; | ||
209 | } | ||
210 | |||
211 | size = recv_data(chip, buf, TPM_HEADER_SIZE); | ||
212 | /* read first 10 bytes, including tag, paramsize, and result */ | ||
213 | if (size < TPM_HEADER_SIZE) { | ||
214 | dev_err(&chip->dev, "Unable to read header\n"); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | ||
219 | if (expected > count) { | ||
220 | size = -EIO; | ||
221 | goto out; | ||
222 | } | ||
223 | |||
224 | size += recv_data(chip, &buf[TPM_HEADER_SIZE], | ||
225 | expected - TPM_HEADER_SIZE); | ||
226 | if (size < expected) { | ||
227 | dev_err(&chip->dev, "Unable to read remainder of result\n"); | ||
228 | size = -ETIME; | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, | ||
233 | &priv->int_queue, false); | ||
234 | status = tpm_tis_status(chip); | ||
235 | if (status & TPM_STS_DATA_AVAIL) { /* retry? */ | ||
236 | dev_err(&chip->dev, "Error left over data\n"); | ||
237 | size = -EIO; | ||
238 | goto out; | ||
239 | } | ||
240 | |||
241 | out: | ||
242 | tpm_tis_ready(chip); | ||
243 | release_locality(chip, priv->locality, 0); | ||
244 | return size; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * If interrupts are used (signaled by an irq set in the vendor structure) | ||
249 | * tpm.c can skip polling for the data to be available as the interrupt is | ||
250 | * waited for here | ||
251 | */ | ||
252 | static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) | ||
253 | { | ||
254 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
255 | int rc, status, burstcnt; | ||
256 | size_t count = 0; | ||
257 | bool itpm = priv->flags & TPM_TIS_ITPM_POSSIBLE; | ||
258 | |||
259 | if (request_locality(chip, 0) < 0) | ||
260 | return -EBUSY; | ||
261 | |||
262 | status = tpm_tis_status(chip); | ||
263 | if ((status & TPM_STS_COMMAND_READY) == 0) { | ||
264 | tpm_tis_ready(chip); | ||
265 | if (wait_for_tpm_stat | ||
266 | (chip, TPM_STS_COMMAND_READY, chip->timeout_b, | ||
267 | &priv->int_queue, false) < 0) { | ||
268 | rc = -ETIME; | ||
269 | goto out_err; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | while (count < len - 1) { | ||
274 | burstcnt = min_t(int, get_burstcount(chip), len - count - 1); | ||
275 | rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality), | ||
276 | burstcnt, buf + count); | ||
277 | if (rc < 0) | ||
278 | goto out_err; | ||
279 | |||
280 | count += burstcnt; | ||
281 | |||
282 | wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, | ||
283 | &priv->int_queue, false); | ||
284 | status = tpm_tis_status(chip); | ||
285 | if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { | ||
286 | rc = -EIO; | ||
287 | goto out_err; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | /* write last byte */ | ||
292 | rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]); | ||
293 | if (rc < 0) | ||
294 | goto out_err; | ||
295 | |||
296 | wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, | ||
297 | &priv->int_queue, false); | ||
298 | status = tpm_tis_status(chip); | ||
299 | if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) { | ||
300 | rc = -EIO; | ||
301 | goto out_err; | ||
302 | } | ||
303 | |||
304 | return 0; | ||
305 | |||
306 | out_err: | ||
307 | tpm_tis_ready(chip); | ||
308 | release_locality(chip, priv->locality, 0); | ||
309 | return rc; | ||
310 | } | ||
311 | |||
312 | static void disable_interrupts(struct tpm_chip *chip) | ||
313 | { | ||
314 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
315 | u32 intmask; | ||
316 | int rc; | ||
317 | |||
318 | rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); | ||
319 | if (rc < 0) | ||
320 | intmask = 0; | ||
321 | |||
322 | intmask &= ~TPM_GLOBAL_INT_ENABLE; | ||
323 | rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); | ||
324 | |||
325 | devm_free_irq(chip->dev.parent, priv->irq, chip); | ||
326 | priv->irq = 0; | ||
327 | chip->flags &= ~TPM_CHIP_FLAG_IRQ; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * If interrupts are used (signaled by an irq set in the vendor structure) | ||
332 | * tpm.c can skip polling for the data to be available as the interrupt is | ||
333 | * waited for here | ||
334 | */ | ||
335 | static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) | ||
336 | { | ||
337 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
338 | int rc; | ||
339 | u32 ordinal; | ||
340 | unsigned long dur; | ||
341 | |||
342 | rc = tpm_tis_send_data(chip, buf, len); | ||
343 | if (rc < 0) | ||
344 | return rc; | ||
345 | |||
346 | /* go and do it */ | ||
347 | rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO); | ||
348 | if (rc < 0) | ||
349 | goto out_err; | ||
350 | |||
351 | if (chip->flags & TPM_CHIP_FLAG_IRQ) { | ||
352 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); | ||
353 | |||
354 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
355 | dur = tpm2_calc_ordinal_duration(chip, ordinal); | ||
356 | else | ||
357 | dur = tpm_calc_ordinal_duration(chip, ordinal); | ||
358 | |||
359 | if (wait_for_tpm_stat | ||
360 | (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, | ||
361 | &priv->read_queue, false) < 0) { | ||
362 | rc = -ETIME; | ||
363 | goto out_err; | ||
364 | } | ||
365 | } | ||
366 | return len; | ||
367 | out_err: | ||
368 | tpm_tis_ready(chip); | ||
369 | release_locality(chip, priv->locality, 0); | ||
370 | return rc; | ||
371 | } | ||
372 | |||
373 | static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) | ||
374 | { | ||
375 | int rc, irq; | ||
376 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
377 | |||
378 | if (!(chip->flags & TPM_CHIP_FLAG_IRQ) || priv->irq_tested) | ||
379 | return tpm_tis_send_main(chip, buf, len); | ||
380 | |||
381 | /* Verify receipt of the expected IRQ */ | ||
382 | irq = priv->irq; | ||
383 | priv->irq = 0; | ||
384 | chip->flags &= ~TPM_CHIP_FLAG_IRQ; | ||
385 | rc = tpm_tis_send_main(chip, buf, len); | ||
386 | priv->irq = irq; | ||
387 | chip->flags |= TPM_CHIP_FLAG_IRQ; | ||
388 | if (!priv->irq_tested) | ||
389 | msleep(1); | ||
390 | if (!priv->irq_tested) | ||
391 | disable_interrupts(chip); | ||
392 | priv->irq_tested = true; | ||
393 | return rc; | ||
394 | } | ||
395 | |||
396 | struct tis_vendor_timeout_override { | ||
397 | u32 did_vid; | ||
398 | unsigned long timeout_us[4]; | ||
399 | }; | ||
400 | |||
401 | static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { | ||
402 | /* Atmel 3204 */ | ||
403 | { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), | ||
404 | (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, | ||
405 | }; | ||
406 | |||
407 | static bool tpm_tis_update_timeouts(struct tpm_chip *chip, | ||
408 | unsigned long *timeout_cap) | ||
409 | { | ||
410 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
411 | int i, rc; | ||
412 | u32 did_vid; | ||
413 | |||
414 | rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid); | ||
415 | if (rc < 0) | ||
416 | return rc; | ||
417 | |||
418 | for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { | ||
419 | if (vendor_timeout_overrides[i].did_vid != did_vid) | ||
420 | continue; | ||
421 | memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, | ||
422 | sizeof(vendor_timeout_overrides[i].timeout_us)); | ||
423 | return true; | ||
424 | } | ||
425 | |||
426 | return false; | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * Early probing for iTPM with STS_DATA_EXPECT flaw. | ||
431 | * Try sending command without itpm flag set and if that | ||
432 | * fails, repeat with itpm flag set. | ||
433 | */ | ||
434 | static int probe_itpm(struct tpm_chip *chip) | ||
435 | { | ||
436 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
437 | int rc = 0; | ||
438 | u8 cmd_getticks[] = { | ||
439 | 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, | ||
440 | 0x00, 0x00, 0x00, 0xf1 | ||
441 | }; | ||
442 | size_t len = sizeof(cmd_getticks); | ||
443 | bool itpm; | ||
444 | u16 vendor; | ||
445 | |||
446 | rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor); | ||
447 | if (rc < 0) | ||
448 | return rc; | ||
449 | |||
450 | /* probe only iTPMS */ | ||
451 | if (vendor != TPM_VID_INTEL) | ||
452 | return 0; | ||
453 | |||
454 | itpm = false; | ||
455 | |||
456 | rc = tpm_tis_send_data(chip, cmd_getticks, len); | ||
457 | if (rc == 0) | ||
458 | goto out; | ||
459 | |||
460 | tpm_tis_ready(chip); | ||
461 | release_locality(chip, priv->locality, 0); | ||
462 | |||
463 | itpm = true; | ||
464 | |||
465 | rc = tpm_tis_send_data(chip, cmd_getticks, len); | ||
466 | if (rc == 0) { | ||
467 | dev_info(&chip->dev, "Detected an iTPM.\n"); | ||
468 | rc = 1; | ||
469 | } else | ||
470 | rc = -EFAULT; | ||
471 | |||
472 | out: | ||
473 | tpm_tis_ready(chip); | ||
474 | release_locality(chip, priv->locality, 0); | ||
475 | |||
476 | return rc; | ||
477 | } | ||
478 | |||
479 | static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) | ||
480 | { | ||
481 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
482 | |||
483 | switch (priv->manufacturer_id) { | ||
484 | case TPM_VID_WINBOND: | ||
485 | return ((status == TPM_STS_VALID) || | ||
486 | (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); | ||
487 | case TPM_VID_STM: | ||
488 | return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); | ||
489 | default: | ||
490 | return (status == TPM_STS_COMMAND_READY); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | static irqreturn_t tis_int_handler(int dummy, void *dev_id) | ||
495 | { | ||
496 | struct tpm_chip *chip = dev_id; | ||
497 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
498 | u32 interrupt; | ||
499 | int i, rc; | ||
500 | |||
501 | rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt); | ||
502 | if (rc < 0) | ||
503 | return IRQ_NONE; | ||
504 | |||
505 | if (interrupt == 0) | ||
506 | return IRQ_NONE; | ||
507 | |||
508 | priv->irq_tested = true; | ||
509 | if (interrupt & TPM_INTF_DATA_AVAIL_INT) | ||
510 | wake_up_interruptible(&priv->read_queue); | ||
511 | if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) | ||
512 | for (i = 0; i < 5; i++) | ||
513 | if (check_locality(chip, i) >= 0) | ||
514 | break; | ||
515 | if (interrupt & | ||
516 | (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | | ||
517 | TPM_INTF_CMD_READY_INT)) | ||
518 | wake_up_interruptible(&priv->int_queue); | ||
519 | |||
520 | /* Clear interrupts handled with TPM_EOI */ | ||
521 | rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt); | ||
522 | if (rc < 0) | ||
523 | return IRQ_NONE; | ||
524 | |||
525 | tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt); | ||
526 | return IRQ_HANDLED; | ||
527 | } | ||
528 | |||
529 | /* Register the IRQ and issue a command that will cause an interrupt. If an | ||
530 | * irq is seen then leave the chip setup for IRQ operation, otherwise reverse | ||
531 | * everything and leave in polling mode. Returns 0 on success. | ||
532 | */ | ||
533 | static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, | ||
534 | int flags, int irq) | ||
535 | { | ||
536 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
537 | u8 original_int_vec; | ||
538 | int rc; | ||
539 | u32 int_status; | ||
540 | |||
541 | if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags, | ||
542 | dev_name(&chip->dev), chip) != 0) { | ||
543 | dev_info(&chip->dev, "Unable to request irq: %d for probe\n", | ||
544 | irq); | ||
545 | return -1; | ||
546 | } | ||
547 | priv->irq = irq; | ||
548 | |||
549 | rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality), | ||
550 | &original_int_vec); | ||
551 | if (rc < 0) | ||
552 | return rc; | ||
553 | |||
554 | rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq); | ||
555 | if (rc < 0) | ||
556 | return rc; | ||
557 | |||
558 | rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status); | ||
559 | if (rc < 0) | ||
560 | return rc; | ||
561 | |||
562 | /* Clear all existing */ | ||
563 | rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status); | ||
564 | if (rc < 0) | ||
565 | return rc; | ||
566 | |||
567 | /* Turn on */ | ||
568 | rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), | ||
569 | intmask | TPM_GLOBAL_INT_ENABLE); | ||
570 | if (rc < 0) | ||
571 | return rc; | ||
572 | |||
573 | priv->irq_tested = false; | ||
574 | |||
575 | /* Generate an interrupt by having the core call through to | ||
576 | * tpm_tis_send | ||
577 | */ | ||
578 | if (chip->flags & TPM_CHIP_FLAG_TPM2) | ||
579 | tpm2_gen_interrupt(chip); | ||
580 | else | ||
581 | tpm_gen_interrupt(chip); | ||
582 | |||
583 | /* tpm_tis_send will either confirm the interrupt is working or it | ||
584 | * will call disable_irq which undoes all of the above. | ||
585 | */ | ||
586 | if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { | ||
587 | rc = tpm_tis_write8(priv, original_int_vec, | ||
588 | TPM_INT_VECTOR(priv->locality)); | ||
589 | if (rc < 0) | ||
590 | return rc; | ||
591 | |||
592 | return 1; | ||
593 | } | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that | ||
599 | * do not have ACPI/etc. We typically expect the interrupt to be declared if | ||
600 | * present. | ||
601 | */ | ||
602 | static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask) | ||
603 | { | ||
604 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
605 | u8 original_int_vec; | ||
606 | int i, rc; | ||
607 | |||
608 | rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality), | ||
609 | &original_int_vec); | ||
610 | if (rc < 0) | ||
611 | return; | ||
612 | |||
613 | if (!original_int_vec) { | ||
614 | if (IS_ENABLED(CONFIG_X86)) | ||
615 | for (i = 3; i <= 15; i++) | ||
616 | if (!tpm_tis_probe_irq_single(chip, intmask, 0, | ||
617 | i)) | ||
618 | return; | ||
619 | } else if (!tpm_tis_probe_irq_single(chip, intmask, 0, | ||
620 | original_int_vec)) | ||
621 | return; | ||
622 | } | ||
623 | |||
624 | void tpm_tis_remove(struct tpm_chip *chip) | ||
625 | { | ||
626 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
627 | u32 reg = TPM_INT_ENABLE(priv->locality); | ||
628 | u32 interrupt; | ||
629 | int rc; | ||
630 | |||
631 | rc = tpm_tis_read32(priv, reg, &interrupt); | ||
632 | if (rc < 0) | ||
633 | interrupt = 0; | ||
634 | |||
635 | tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); | ||
636 | release_locality(chip, priv->locality, 1); | ||
637 | } | ||
638 | EXPORT_SYMBOL_GPL(tpm_tis_remove); | ||
639 | |||
640 | static const struct tpm_class_ops tpm_tis = { | ||
641 | .flags = TPM_OPS_AUTO_STARTUP, | ||
642 | .status = tpm_tis_status, | ||
643 | .recv = tpm_tis_recv, | ||
644 | .send = tpm_tis_send, | ||
645 | .cancel = tpm_tis_ready, | ||
646 | .update_timeouts = tpm_tis_update_timeouts, | ||
647 | .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, | ||
648 | .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, | ||
649 | .req_canceled = tpm_tis_req_canceled, | ||
650 | }; | ||
651 | |||
652 | int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, | ||
653 | const struct tpm_tis_phy_ops *phy_ops, | ||
654 | acpi_handle acpi_dev_handle) | ||
655 | { | ||
656 | u32 vendor, intfcaps, intmask; | ||
657 | u8 rid; | ||
658 | int rc, probe; | ||
659 | struct tpm_chip *chip; | ||
660 | |||
661 | chip = tpmm_chip_alloc(dev, &tpm_tis); | ||
662 | if (IS_ERR(chip)) | ||
663 | return PTR_ERR(chip); | ||
664 | |||
665 | #ifdef CONFIG_ACPI | ||
666 | chip->acpi_dev_handle = acpi_dev_handle; | ||
667 | #endif | ||
668 | |||
669 | /* Maximum timeouts */ | ||
670 | chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX); | ||
671 | chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX); | ||
672 | chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX); | ||
673 | chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX); | ||
674 | priv->phy_ops = phy_ops; | ||
675 | dev_set_drvdata(&chip->dev, priv); | ||
676 | |||
677 | if (wait_startup(chip, 0) != 0) { | ||
678 | rc = -ENODEV; | ||
679 | goto out_err; | ||
680 | } | ||
681 | |||
682 | /* Take control of the TPM's interrupt hardware and shut it off */ | ||
683 | rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); | ||
684 | if (rc < 0) | ||
685 | goto out_err; | ||
686 | |||
687 | intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | | ||
688 | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; | ||
689 | intmask &= ~TPM_GLOBAL_INT_ENABLE; | ||
690 | tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); | ||
691 | |||
692 | if (request_locality(chip, 0) != 0) { | ||
693 | rc = -ENODEV; | ||
694 | goto out_err; | ||
695 | } | ||
696 | |||
697 | rc = tpm2_probe(chip); | ||
698 | if (rc) | ||
699 | goto out_err; | ||
700 | |||
701 | rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); | ||
702 | if (rc < 0) | ||
703 | goto out_err; | ||
704 | |||
705 | priv->manufacturer_id = vendor; | ||
706 | |||
707 | rc = tpm_tis_read8(priv, TPM_RID(0), &rid); | ||
708 | if (rc < 0) | ||
709 | goto out_err; | ||
710 | |||
711 | dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", | ||
712 | (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", | ||
713 | vendor >> 16, rid); | ||
714 | |||
715 | if (!(priv->flags & TPM_TIS_ITPM_POSSIBLE)) { | ||
716 | probe = probe_itpm(chip); | ||
717 | if (probe < 0) { | ||
718 | rc = -ENODEV; | ||
719 | goto out_err; | ||
720 | } | ||
721 | |||
722 | if (!!probe) | ||
723 | priv->flags |= TPM_TIS_ITPM_POSSIBLE; | ||
724 | } | ||
725 | |||
726 | /* Figure out the capabilities */ | ||
727 | rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps); | ||
728 | if (rc < 0) | ||
729 | goto out_err; | ||
730 | |||
731 | dev_dbg(dev, "TPM interface capabilities (0x%x):\n", | ||
732 | intfcaps); | ||
733 | if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) | ||
734 | dev_dbg(dev, "\tBurst Count Static\n"); | ||
735 | if (intfcaps & TPM_INTF_CMD_READY_INT) | ||
736 | dev_dbg(dev, "\tCommand Ready Int Support\n"); | ||
737 | if (intfcaps & TPM_INTF_INT_EDGE_FALLING) | ||
738 | dev_dbg(dev, "\tInterrupt Edge Falling\n"); | ||
739 | if (intfcaps & TPM_INTF_INT_EDGE_RISING) | ||
740 | dev_dbg(dev, "\tInterrupt Edge Rising\n"); | ||
741 | if (intfcaps & TPM_INTF_INT_LEVEL_LOW) | ||
742 | dev_dbg(dev, "\tInterrupt Level Low\n"); | ||
743 | if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) | ||
744 | dev_dbg(dev, "\tInterrupt Level High\n"); | ||
745 | if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) | ||
746 | dev_dbg(dev, "\tLocality Change Int Support\n"); | ||
747 | if (intfcaps & TPM_INTF_STS_VALID_INT) | ||
748 | dev_dbg(dev, "\tSts Valid Int Support\n"); | ||
749 | if (intfcaps & TPM_INTF_DATA_AVAIL_INT) | ||
750 | dev_dbg(dev, "\tData Avail Int Support\n"); | ||
751 | |||
752 | /* Very early on issue a command to the TPM in polling mode to make | ||
753 | * sure it works. May as well use that command to set the proper | ||
754 | * timeouts for the driver. | ||
755 | */ | ||
756 | if (tpm_get_timeouts(chip)) { | ||
757 | dev_err(dev, "Could not get TPM timeouts and durations\n"); | ||
758 | rc = -ENODEV; | ||
759 | goto out_err; | ||
760 | } | ||
761 | |||
762 | /* INTERRUPT Setup */ | ||
763 | init_waitqueue_head(&priv->read_queue); | ||
764 | init_waitqueue_head(&priv->int_queue); | ||
765 | if (irq != -1) { | ||
766 | if (irq) { | ||
767 | tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, | ||
768 | irq); | ||
769 | if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) | ||
770 | dev_err(&chip->dev, FW_BUG | ||
771 | "TPM interrupt not working, polling instead\n"); | ||
772 | } else { | ||
773 | tpm_tis_probe_irq(chip, intmask); | ||
774 | } | ||
775 | } | ||
776 | |||
777 | return tpm_chip_register(chip); | ||
778 | out_err: | ||
779 | tpm_tis_remove(chip); | ||
780 | return rc; | ||
781 | } | ||
782 | EXPORT_SYMBOL_GPL(tpm_tis_core_init); | ||
783 | |||
784 | #ifdef CONFIG_PM_SLEEP | ||
785 | static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) | ||
786 | { | ||
787 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | ||
788 | u32 intmask; | ||
789 | int rc; | ||
790 | |||
791 | /* reenable interrupts that device may have lost or | ||
792 | * BIOS/firmware may have disabled | ||
793 | */ | ||
794 | rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq); | ||
795 | if (rc < 0) | ||
796 | return; | ||
797 | |||
798 | rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); | ||
799 | if (rc < 0) | ||
800 | return; | ||
801 | |||
802 | intmask |= TPM_INTF_CMD_READY_INT | ||
803 | | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | ||
804 | | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; | ||
805 | |||
806 | tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); | ||
807 | } | ||
808 | |||
809 | int tpm_tis_resume(struct device *dev) | ||
810 | { | ||
811 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
812 | int ret; | ||
813 | |||
814 | if (chip->flags & TPM_CHIP_FLAG_IRQ) | ||
815 | tpm_tis_reenable_interrupts(chip); | ||
816 | |||
817 | ret = tpm_pm_resume(dev); | ||
818 | if (ret) | ||
819 | return ret; | ||
820 | |||
821 | /* TPM 1.2 requires self-test on resume. This function actually returns | ||
822 | * an error code but for unknown reason it isn't handled. | ||
823 | */ | ||
824 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) | ||
825 | tpm_do_selftest(chip); | ||
826 | |||
827 | return 0; | ||
828 | } | ||
829 | EXPORT_SYMBOL_GPL(tpm_tis_resume); | ||
830 | #endif | ||
831 | |||
832 | MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); | ||
833 | MODULE_DESCRIPTION("TPM Driver"); | ||
834 | MODULE_VERSION("2.0"); | ||
835 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h new file mode 100644 index 000000000000..9191aabbf9c2 --- /dev/null +++ b/drivers/char/tpm/tpm_tis_core.h | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005, 2006 IBM Corporation | ||
3 | * Copyright (C) 2014, 2015 Intel Corporation | ||
4 | * | ||
5 | * Authors: | ||
6 | * Leendert van Doorn <leendert@watson.ibm.com> | ||
7 | * Kylene Hall <kjhall@us.ibm.com> | ||
8 | * | ||
9 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> | ||
10 | * | ||
11 | * Device driver for TCG/TCPA TPM (trusted platform module). | ||
12 | * Specifications at www.trustedcomputinggroup.org | ||
13 | * | ||
14 | * This device driver implements the TPM interface as defined in | ||
15 | * the TCG TPM Interface Spec version 1.2, revision 1.0. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License as | ||
19 | * published by the Free Software Foundation, version 2 of the | ||
20 | * License. | ||
21 | */ | ||
22 | |||
23 | #ifndef __TPM_TIS_CORE_H__ | ||
24 | #define __TPM_TIS_CORE_H__ | ||
25 | |||
26 | #include "tpm.h" | ||
27 | |||
28 | enum tis_access { | ||
29 | TPM_ACCESS_VALID = 0x80, | ||
30 | TPM_ACCESS_ACTIVE_LOCALITY = 0x20, | ||
31 | TPM_ACCESS_REQUEST_PENDING = 0x04, | ||
32 | TPM_ACCESS_REQUEST_USE = 0x02, | ||
33 | }; | ||
34 | |||
35 | enum tis_status { | ||
36 | TPM_STS_VALID = 0x80, | ||
37 | TPM_STS_COMMAND_READY = 0x40, | ||
38 | TPM_STS_GO = 0x20, | ||
39 | TPM_STS_DATA_AVAIL = 0x10, | ||
40 | TPM_STS_DATA_EXPECT = 0x08, | ||
41 | }; | ||
42 | |||
43 | enum tis_int_flags { | ||
44 | TPM_GLOBAL_INT_ENABLE = 0x80000000, | ||
45 | TPM_INTF_BURST_COUNT_STATIC = 0x100, | ||
46 | TPM_INTF_CMD_READY_INT = 0x080, | ||
47 | TPM_INTF_INT_EDGE_FALLING = 0x040, | ||
48 | TPM_INTF_INT_EDGE_RISING = 0x020, | ||
49 | TPM_INTF_INT_LEVEL_LOW = 0x010, | ||
50 | TPM_INTF_INT_LEVEL_HIGH = 0x008, | ||
51 | TPM_INTF_LOCALITY_CHANGE_INT = 0x004, | ||
52 | TPM_INTF_STS_VALID_INT = 0x002, | ||
53 | TPM_INTF_DATA_AVAIL_INT = 0x001, | ||
54 | }; | ||
55 | |||
56 | enum tis_defaults { | ||
57 | TIS_MEM_LEN = 0x5000, | ||
58 | TIS_SHORT_TIMEOUT = 750, /* ms */ | ||
59 | TIS_LONG_TIMEOUT = 2000, /* 2 sec */ | ||
60 | }; | ||
61 | |||
62 | /* Some timeout values are needed before it is known whether the chip is | ||
63 | * TPM 1.0 or TPM 2.0. | ||
64 | */ | ||
65 | #define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A) | ||
66 | #define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B) | ||
67 | #define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C) | ||
68 | #define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D) | ||
69 | |||
70 | #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) | ||
71 | #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) | ||
72 | #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) | ||
73 | #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) | ||
74 | #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) | ||
75 | #define TPM_STS(l) (0x0018 | ((l) << 12)) | ||
76 | #define TPM_STS3(l) (0x001b | ((l) << 12)) | ||
77 | #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) | ||
78 | |||
79 | #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) | ||
80 | #define TPM_RID(l) (0x0F04 | ((l) << 12)) | ||
81 | |||
82 | enum tpm_tis_flags { | ||
83 | TPM_TIS_ITPM_POSSIBLE = BIT(0), | ||
84 | }; | ||
85 | |||
86 | struct tpm_tis_data { | ||
87 | u16 manufacturer_id; | ||
88 | int locality; | ||
89 | int irq; | ||
90 | bool irq_tested; | ||
91 | unsigned int flags; | ||
92 | wait_queue_head_t int_queue; | ||
93 | wait_queue_head_t read_queue; | ||
94 | const struct tpm_tis_phy_ops *phy_ops; | ||
95 | }; | ||
96 | |||
97 | struct tpm_tis_phy_ops { | ||
98 | int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, | ||
99 | u8 *result); | ||
100 | int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, | ||
101 | u8 *value); | ||
102 | int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result); | ||
103 | int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result); | ||
104 | int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src); | ||
105 | }; | ||
106 | |||
107 | static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr, | ||
108 | u16 len, u8 *result) | ||
109 | { | ||
110 | return data->phy_ops->read_bytes(data, addr, len, result); | ||
111 | } | ||
112 | |||
113 | static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result) | ||
114 | { | ||
115 | return data->phy_ops->read_bytes(data, addr, 1, result); | ||
116 | } | ||
117 | |||
118 | static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr, | ||
119 | u16 *result) | ||
120 | { | ||
121 | return data->phy_ops->read16(data, addr, result); | ||
122 | } | ||
123 | |||
124 | static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr, | ||
125 | u32 *result) | ||
126 | { | ||
127 | return data->phy_ops->read32(data, addr, result); | ||
128 | } | ||
129 | |||
130 | static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr, | ||
131 | u16 len, u8 *value) | ||
132 | { | ||
133 | return data->phy_ops->write_bytes(data, addr, len, value); | ||
134 | } | ||
135 | |||
136 | static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value) | ||
137 | { | ||
138 | return data->phy_ops->write_bytes(data, addr, 1, &value); | ||
139 | } | ||
140 | |||
141 | static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr, | ||
142 | u32 value) | ||
143 | { | ||
144 | return data->phy_ops->write32(data, addr, value); | ||
145 | } | ||
146 | |||
147 | void tpm_tis_remove(struct tpm_chip *chip); | ||
148 | int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, | ||
149 | const struct tpm_tis_phy_ops *phy_ops, | ||
150 | acpi_handle acpi_dev_handle); | ||
151 | |||
152 | #ifdef CONFIG_PM_SLEEP | ||
153 | int tpm_tis_resume(struct device *dev); | ||
154 | #endif | ||
155 | |||
156 | #endif | ||
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c new file mode 100644 index 000000000000..dbaad9c681e3 --- /dev/null +++ b/drivers/char/tpm/tpm_tis_spi.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Infineon Technologies AG | ||
3 | * Copyright (C) 2016 STMicroelectronics SAS | ||
4 | * | ||
5 | * Authors: | ||
6 | * Peter Huewe <peter.huewe@infineon.com> | ||
7 | * Christophe Ricard <christophe-h.ricard@st.com> | ||
8 | * | ||
9 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> | ||
10 | * | ||
11 | * Device driver for TCG/TCPA TPM (trusted platform module). | ||
12 | * Specifications at www.trustedcomputinggroup.org | ||
13 | * | ||
14 | * This device driver implements the TPM interface as defined in | ||
15 | * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native | ||
16 | * SPI access_. | ||
17 | * | ||
18 | * It is based on the original tpm_tis device driver from Leendert van | ||
19 | * Dorn and Kyleen Hall and Jarko Sakkinnen. | ||
20 | * | ||
21 | * This program is free software; you can redistribute it and/or | ||
22 | * modify it under the terms of the GNU General Public License as | ||
23 | * published by the Free Software Foundation, version 2 of the | ||
24 | * License. | ||
25 | */ | ||
26 | |||
27 | #include <linux/init.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/moduleparam.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/acpi.h> | ||
34 | #include <linux/freezer.h> | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/spi/spi.h> | ||
38 | #include <linux/gpio.h> | ||
39 | #include <linux/of_irq.h> | ||
40 | #include <linux/of_gpio.h> | ||
41 | #include <linux/tpm.h> | ||
42 | #include "tpm.h" | ||
43 | #include "tpm_tis_core.h" | ||
44 | |||
45 | #define MAX_SPI_FRAMESIZE 64 | ||
46 | |||
47 | struct tpm_tis_spi_phy { | ||
48 | struct tpm_tis_data priv; | ||
49 | struct spi_device *spi_device; | ||
50 | |||
51 | u8 tx_buf[MAX_SPI_FRAMESIZE + 4]; | ||
52 | u8 rx_buf[MAX_SPI_FRAMESIZE + 4]; | ||
53 | }; | ||
54 | |||
55 | static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data) | ||
56 | { | ||
57 | return container_of(data, struct tpm_tis_spi_phy, priv); | ||
58 | } | ||
59 | |||
60 | static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, | ||
61 | u16 len, u8 *result) | ||
62 | { | ||
63 | struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); | ||
64 | int ret, i; | ||
65 | struct spi_message m; | ||
66 | struct spi_transfer spi_xfer = { | ||
67 | .tx_buf = phy->tx_buf, | ||
68 | .rx_buf = phy->rx_buf, | ||
69 | .len = 4, | ||
70 | }; | ||
71 | |||
72 | if (len > MAX_SPI_FRAMESIZE) | ||
73 | return -ENOMEM; | ||
74 | |||
75 | phy->tx_buf[0] = 0x80 | (len - 1); | ||
76 | phy->tx_buf[1] = 0xd4; | ||
77 | phy->tx_buf[2] = (addr >> 8) & 0xFF; | ||
78 | phy->tx_buf[3] = addr & 0xFF; | ||
79 | |||
80 | spi_xfer.cs_change = 1; | ||
81 | spi_message_init(&m); | ||
82 | spi_message_add_tail(&spi_xfer, &m); | ||
83 | |||
84 | spi_bus_lock(phy->spi_device->master); | ||
85 | ret = spi_sync_locked(phy->spi_device, &m); | ||
86 | if (ret < 0) | ||
87 | goto exit; | ||
88 | |||
89 | memset(phy->tx_buf, 0, len); | ||
90 | |||
91 | /* According to TCG PTP specification, if there is no TPM present at | ||
92 | * all, then the design has a weak pull-up on MISO. If a TPM is not | ||
93 | * present, a pull-up on MISO means that the SB controller sees a 1, | ||
94 | * and will latch in 0xFF on the read. | ||
95 | */ | ||
96 | for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) { | ||
97 | spi_xfer.len = 1; | ||
98 | spi_message_init(&m); | ||
99 | spi_message_add_tail(&spi_xfer, &m); | ||
100 | ret = spi_sync_locked(phy->spi_device, &m); | ||
101 | if (ret < 0) | ||
102 | goto exit; | ||
103 | } | ||
104 | |||
105 | spi_xfer.cs_change = 0; | ||
106 | spi_xfer.len = len; | ||
107 | spi_xfer.rx_buf = result; | ||
108 | |||
109 | spi_message_init(&m); | ||
110 | spi_message_add_tail(&spi_xfer, &m); | ||
111 | ret = spi_sync_locked(phy->spi_device, &m); | ||
112 | |||
113 | exit: | ||
114 | spi_bus_unlock(phy->spi_device->master); | ||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, | ||
119 | u16 len, u8 *value) | ||
120 | { | ||
121 | struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); | ||
122 | int ret, i; | ||
123 | struct spi_message m; | ||
124 | struct spi_transfer spi_xfer = { | ||
125 | .tx_buf = phy->tx_buf, | ||
126 | .rx_buf = phy->rx_buf, | ||
127 | .len = 4, | ||
128 | }; | ||
129 | |||
130 | if (len > MAX_SPI_FRAMESIZE) | ||
131 | return -ENOMEM; | ||
132 | |||
133 | phy->tx_buf[0] = len - 1; | ||
134 | phy->tx_buf[1] = 0xd4; | ||
135 | phy->tx_buf[2] = (addr >> 8) & 0xFF; | ||
136 | phy->tx_buf[3] = addr & 0xFF; | ||
137 | |||
138 | spi_xfer.cs_change = 1; | ||
139 | spi_message_init(&m); | ||
140 | spi_message_add_tail(&spi_xfer, &m); | ||
141 | |||
142 | spi_bus_lock(phy->spi_device->master); | ||
143 | ret = spi_sync_locked(phy->spi_device, &m); | ||
144 | if (ret < 0) | ||
145 | goto exit; | ||
146 | |||
147 | memset(phy->tx_buf, 0, len); | ||
148 | |||
149 | /* According to TCG PTP specification, if there is no TPM present at | ||
150 | * all, then the design has a weak pull-up on MISO. If a TPM is not | ||
151 | * present, a pull-up on MISO means that the SB controller sees a 1, | ||
152 | * and will latch in 0xFF on the read. | ||
153 | */ | ||
154 | for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) { | ||
155 | spi_xfer.len = 1; | ||
156 | spi_message_init(&m); | ||
157 | spi_message_add_tail(&spi_xfer, &m); | ||
158 | ret = spi_sync_locked(phy->spi_device, &m); | ||
159 | if (ret < 0) | ||
160 | goto exit; | ||
161 | } | ||
162 | |||
163 | spi_xfer.len = len; | ||
164 | spi_xfer.tx_buf = value; | ||
165 | spi_xfer.cs_change = 0; | ||
166 | spi_xfer.tx_buf = value; | ||
167 | spi_message_init(&m); | ||
168 | spi_message_add_tail(&spi_xfer, &m); | ||
169 | ret = spi_sync_locked(phy->spi_device, &m); | ||
170 | |||
171 | exit: | ||
172 | spi_bus_unlock(phy->spi_device->master); | ||
173 | return ret; | ||
174 | } | ||
175 | |||
176 | static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) | ||
177 | { | ||
178 | int rc; | ||
179 | |||
180 | rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result); | ||
181 | if (!rc) | ||
182 | *result = le16_to_cpu(*result); | ||
183 | return rc; | ||
184 | } | ||
185 | |||
186 | static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result) | ||
187 | { | ||
188 | int rc; | ||
189 | |||
190 | rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result); | ||
191 | if (!rc) | ||
192 | *result = le32_to_cpu(*result); | ||
193 | return rc; | ||
194 | } | ||
195 | |||
196 | static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value) | ||
197 | { | ||
198 | value = cpu_to_le32(value); | ||
199 | return data->phy_ops->write_bytes(data, addr, sizeof(u32), | ||
200 | (u8 *)&value); | ||
201 | } | ||
202 | |||
203 | static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { | ||
204 | .read_bytes = tpm_tis_spi_read_bytes, | ||
205 | .write_bytes = tpm_tis_spi_write_bytes, | ||
206 | .read16 = tpm_tis_spi_read16, | ||
207 | .read32 = tpm_tis_spi_read32, | ||
208 | .write32 = tpm_tis_spi_write32, | ||
209 | }; | ||
210 | |||
211 | static int tpm_tis_spi_probe(struct spi_device *dev) | ||
212 | { | ||
213 | struct tpm_tis_spi_phy *phy; | ||
214 | |||
215 | phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy), | ||
216 | GFP_KERNEL); | ||
217 | if (!phy) | ||
218 | return -ENOMEM; | ||
219 | |||
220 | phy->spi_device = dev; | ||
221 | |||
222 | return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops, | ||
223 | NULL); | ||
224 | } | ||
225 | |||
226 | static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); | ||
227 | |||
228 | static int tpm_tis_spi_remove(struct spi_device *dev) | ||
229 | { | ||
230 | struct tpm_chip *chip = spi_get_drvdata(dev); | ||
231 | |||
232 | tpm_chip_unregister(chip); | ||
233 | tpm_tis_remove(chip); | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static const struct spi_device_id tpm_tis_spi_id[] = { | ||
238 | {"tpm_tis_spi", 0}, | ||
239 | {} | ||
240 | }; | ||
241 | MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id); | ||
242 | |||
243 | static const struct of_device_id of_tis_spi_match[] = { | ||
244 | { .compatible = "st,st33htpm-spi", }, | ||
245 | { .compatible = "infineon,slb9670", }, | ||
246 | { .compatible = "tcg,tpm_tis-spi", }, | ||
247 | {} | ||
248 | }; | ||
249 | MODULE_DEVICE_TABLE(of, of_tis_spi_match); | ||
250 | |||
251 | static const struct acpi_device_id acpi_tis_spi_match[] = { | ||
252 | {"SMO0768", 0}, | ||
253 | {} | ||
254 | }; | ||
255 | MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match); | ||
256 | |||
257 | static struct spi_driver tpm_tis_spi_driver = { | ||
258 | .driver = { | ||
259 | .owner = THIS_MODULE, | ||
260 | .name = "tpm_tis_spi", | ||
261 | .pm = &tpm_tis_pm, | ||
262 | .of_match_table = of_match_ptr(of_tis_spi_match), | ||
263 | .acpi_match_table = ACPI_PTR(acpi_tis_spi_match), | ||
264 | }, | ||
265 | .probe = tpm_tis_spi_probe, | ||
266 | .remove = tpm_tis_spi_remove, | ||
267 | .id_table = tpm_tis_spi_id, | ||
268 | }; | ||
269 | module_spi_driver(tpm_tis_spi_driver); | ||
270 | |||
271 | MODULE_DESCRIPTION("TPM Driver for native SPI access"); | ||
272 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c new file mode 100644 index 000000000000..9a940332c157 --- /dev/null +++ b/drivers/char/tpm/tpm_vtpm_proxy.c | |||
@@ -0,0 +1,637 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015, 2016 IBM Corporation | ||
3 | * | ||
4 | * Author: Stefan Berger <stefanb@us.ibm.com> | ||
5 | * | ||
6 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> | ||
7 | * | ||
8 | * Device driver for vTPM (vTPM proxy driver) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License as | ||
12 | * published by the Free Software Foundation, version 2 of the | ||
13 | * License. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/uaccess.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <linux/miscdevice.h> | ||
22 | #include <linux/vtpm_proxy.h> | ||
23 | #include <linux/file.h> | ||
24 | #include <linux/anon_inodes.h> | ||
25 | #include <linux/poll.h> | ||
26 | #include <linux/compat.h> | ||
27 | |||
28 | #include "tpm.h" | ||
29 | |||
30 | #define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0) | ||
31 | |||
32 | struct proxy_dev { | ||
33 | struct tpm_chip *chip; | ||
34 | |||
35 | u32 flags; /* public API flags */ | ||
36 | |||
37 | wait_queue_head_t wq; | ||
38 | |||
39 | struct mutex buf_lock; /* protect buffer and flags */ | ||
40 | |||
41 | long state; /* internal state */ | ||
42 | #define STATE_OPENED_FLAG BIT(0) | ||
43 | #define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */ | ||
44 | |||
45 | size_t req_len; /* length of queued TPM request */ | ||
46 | size_t resp_len; /* length of queued TPM response */ | ||
47 | u8 buffer[TPM_BUFSIZE]; /* request/response buffer */ | ||
48 | |||
49 | struct work_struct work; /* task that retrieves TPM timeouts */ | ||
50 | }; | ||
51 | |||
52 | /* all supported flags */ | ||
53 | #define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2) | ||
54 | |||
55 | static struct workqueue_struct *workqueue; | ||
56 | |||
57 | static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev); | ||
58 | |||
59 | /* | ||
60 | * Functions related to 'server side' | ||
61 | */ | ||
62 | |||
63 | /** | ||
64 | * vtpm_proxy_fops_read - Read TPM commands on 'server side' | ||
65 | * | ||
66 | * Return value: | ||
67 | * Number of bytes read or negative error code | ||
68 | */ | ||
69 | static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf, | ||
70 | size_t count, loff_t *off) | ||
71 | { | ||
72 | struct proxy_dev *proxy_dev = filp->private_data; | ||
73 | size_t len; | ||
74 | int sig, rc; | ||
75 | |||
76 | sig = wait_event_interruptible(proxy_dev->wq, | ||
77 | proxy_dev->req_len != 0 || | ||
78 | !(proxy_dev->state & STATE_OPENED_FLAG)); | ||
79 | if (sig) | ||
80 | return -EINTR; | ||
81 | |||
82 | mutex_lock(&proxy_dev->buf_lock); | ||
83 | |||
84 | if (!(proxy_dev->state & STATE_OPENED_FLAG)) { | ||
85 | mutex_unlock(&proxy_dev->buf_lock); | ||
86 | return -EPIPE; | ||
87 | } | ||
88 | |||
89 | len = proxy_dev->req_len; | ||
90 | |||
91 | if (count < len) { | ||
92 | mutex_unlock(&proxy_dev->buf_lock); | ||
93 | pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n", | ||
94 | count, len); | ||
95 | return -EIO; | ||
96 | } | ||
97 | |||
98 | rc = copy_to_user(buf, proxy_dev->buffer, len); | ||
99 | memset(proxy_dev->buffer, 0, len); | ||
100 | proxy_dev->req_len = 0; | ||
101 | |||
102 | if (!rc) | ||
103 | proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG; | ||
104 | |||
105 | mutex_unlock(&proxy_dev->buf_lock); | ||
106 | |||
107 | if (rc) | ||
108 | return -EFAULT; | ||
109 | |||
110 | return len; | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * vtpm_proxy_fops_write - Write TPM responses on 'server side' | ||
115 | * | ||
116 | * Return value: | ||
117 | * Number of bytes read or negative error value | ||
118 | */ | ||
119 | static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf, | ||
120 | size_t count, loff_t *off) | ||
121 | { | ||
122 | struct proxy_dev *proxy_dev = filp->private_data; | ||
123 | |||
124 | mutex_lock(&proxy_dev->buf_lock); | ||
125 | |||
126 | if (!(proxy_dev->state & STATE_OPENED_FLAG)) { | ||
127 | mutex_unlock(&proxy_dev->buf_lock); | ||
128 | return -EPIPE; | ||
129 | } | ||
130 | |||
131 | if (count > sizeof(proxy_dev->buffer) || | ||
132 | !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) { | ||
133 | mutex_unlock(&proxy_dev->buf_lock); | ||
134 | return -EIO; | ||
135 | } | ||
136 | |||
137 | proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; | ||
138 | |||
139 | proxy_dev->req_len = 0; | ||
140 | |||
141 | if (copy_from_user(proxy_dev->buffer, buf, count)) { | ||
142 | mutex_unlock(&proxy_dev->buf_lock); | ||
143 | return -EFAULT; | ||
144 | } | ||
145 | |||
146 | proxy_dev->resp_len = count; | ||
147 | |||
148 | mutex_unlock(&proxy_dev->buf_lock); | ||
149 | |||
150 | wake_up_interruptible(&proxy_dev->wq); | ||
151 | |||
152 | return count; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * vtpm_proxy_fops_poll: Poll status on 'server side' | ||
157 | * | ||
158 | * Return value: | ||
159 | * Poll flags | ||
160 | */ | ||
161 | static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait) | ||
162 | { | ||
163 | struct proxy_dev *proxy_dev = filp->private_data; | ||
164 | unsigned ret; | ||
165 | |||
166 | poll_wait(filp, &proxy_dev->wq, wait); | ||
167 | |||
168 | ret = POLLOUT; | ||
169 | |||
170 | mutex_lock(&proxy_dev->buf_lock); | ||
171 | |||
172 | if (proxy_dev->req_len) | ||
173 | ret |= POLLIN | POLLRDNORM; | ||
174 | |||
175 | if (!(proxy_dev->state & STATE_OPENED_FLAG)) | ||
176 | ret |= POLLHUP; | ||
177 | |||
178 | mutex_unlock(&proxy_dev->buf_lock); | ||
179 | |||
180 | return ret; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * vtpm_proxy_fops_open - Open vTPM device on 'server side' | ||
185 | * | ||
186 | * Called when setting up the anonymous file descriptor | ||
187 | */ | ||
188 | static void vtpm_proxy_fops_open(struct file *filp) | ||
189 | { | ||
190 | struct proxy_dev *proxy_dev = filp->private_data; | ||
191 | |||
192 | proxy_dev->state |= STATE_OPENED_FLAG; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open | ||
197 | * | ||
198 | * Call to undo vtpm_proxy_fops_open | ||
199 | */ | ||
200 | static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev) | ||
201 | { | ||
202 | mutex_lock(&proxy_dev->buf_lock); | ||
203 | |||
204 | proxy_dev->state &= ~STATE_OPENED_FLAG; | ||
205 | |||
206 | mutex_unlock(&proxy_dev->buf_lock); | ||
207 | |||
208 | /* no more TPM responses -- wake up anyone waiting for them */ | ||
209 | wake_up_interruptible(&proxy_dev->wq); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * vtpm_proxy_fops_release: Close 'server side' | ||
214 | * | ||
215 | * Return value: | ||
216 | * Always returns 0. | ||
217 | */ | ||
218 | static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) | ||
219 | { | ||
220 | struct proxy_dev *proxy_dev = filp->private_data; | ||
221 | |||
222 | filp->private_data = NULL; | ||
223 | |||
224 | vtpm_proxy_delete_device(proxy_dev); | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static const struct file_operations vtpm_proxy_fops = { | ||
230 | .owner = THIS_MODULE, | ||
231 | .llseek = no_llseek, | ||
232 | .read = vtpm_proxy_fops_read, | ||
233 | .write = vtpm_proxy_fops_write, | ||
234 | .poll = vtpm_proxy_fops_poll, | ||
235 | .release = vtpm_proxy_fops_release, | ||
236 | }; | ||
237 | |||
238 | /* | ||
239 | * Functions invoked by the core TPM driver to send TPM commands to | ||
240 | * 'server side' and receive responses from there. | ||
241 | */ | ||
242 | |||
243 | /* | ||
244 | * Called when core TPM driver reads TPM responses from 'server side' | ||
245 | * | ||
246 | * Return value: | ||
247 | * Number of TPM response bytes read, negative error value otherwise | ||
248 | */ | ||
249 | static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) | ||
250 | { | ||
251 | struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); | ||
252 | size_t len; | ||
253 | |||
254 | /* process gone ? */ | ||
255 | mutex_lock(&proxy_dev->buf_lock); | ||
256 | |||
257 | if (!(proxy_dev->state & STATE_OPENED_FLAG)) { | ||
258 | mutex_unlock(&proxy_dev->buf_lock); | ||
259 | return -EPIPE; | ||
260 | } | ||
261 | |||
262 | len = proxy_dev->resp_len; | ||
263 | if (count < len) { | ||
264 | dev_err(&chip->dev, | ||
265 | "Invalid size in recv: count=%zd, resp_len=%zd\n", | ||
266 | count, len); | ||
267 | len = -EIO; | ||
268 | goto out; | ||
269 | } | ||
270 | |||
271 | memcpy(buf, proxy_dev->buffer, len); | ||
272 | proxy_dev->resp_len = 0; | ||
273 | |||
274 | out: | ||
275 | mutex_unlock(&proxy_dev->buf_lock); | ||
276 | |||
277 | return len; | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * Called when core TPM driver forwards TPM requests to 'server side'. | ||
282 | * | ||
283 | * Return value: | ||
284 | * 0 in case of success, negative error value otherwise. | ||
285 | */ | ||
286 | static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) | ||
287 | { | ||
288 | struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); | ||
289 | int rc = 0; | ||
290 | |||
291 | if (count > sizeof(proxy_dev->buffer)) { | ||
292 | dev_err(&chip->dev, | ||
293 | "Invalid size in send: count=%zd, buffer size=%zd\n", | ||
294 | count, sizeof(proxy_dev->buffer)); | ||
295 | return -EIO; | ||
296 | } | ||
297 | |||
298 | mutex_lock(&proxy_dev->buf_lock); | ||
299 | |||
300 | if (!(proxy_dev->state & STATE_OPENED_FLAG)) { | ||
301 | mutex_unlock(&proxy_dev->buf_lock); | ||
302 | return -EPIPE; | ||
303 | } | ||
304 | |||
305 | proxy_dev->resp_len = 0; | ||
306 | |||
307 | proxy_dev->req_len = count; | ||
308 | memcpy(proxy_dev->buffer, buf, count); | ||
309 | |||
310 | proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; | ||
311 | |||
312 | mutex_unlock(&proxy_dev->buf_lock); | ||
313 | |||
314 | wake_up_interruptible(&proxy_dev->wq); | ||
315 | |||
316 | return rc; | ||
317 | } | ||
318 | |||
319 | static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) | ||
320 | { | ||
321 | /* not supported */ | ||
322 | } | ||
323 | |||
324 | static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip) | ||
325 | { | ||
326 | struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); | ||
327 | |||
328 | if (proxy_dev->resp_len) | ||
329 | return VTPM_PROXY_REQ_COMPLETE_FLAG; | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status) | ||
335 | { | ||
336 | struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); | ||
337 | bool ret; | ||
338 | |||
339 | mutex_lock(&proxy_dev->buf_lock); | ||
340 | |||
341 | ret = !(proxy_dev->state & STATE_OPENED_FLAG); | ||
342 | |||
343 | mutex_unlock(&proxy_dev->buf_lock); | ||
344 | |||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | static const struct tpm_class_ops vtpm_proxy_tpm_ops = { | ||
349 | .flags = TPM_OPS_AUTO_STARTUP, | ||
350 | .recv = vtpm_proxy_tpm_op_recv, | ||
351 | .send = vtpm_proxy_tpm_op_send, | ||
352 | .cancel = vtpm_proxy_tpm_op_cancel, | ||
353 | .status = vtpm_proxy_tpm_op_status, | ||
354 | .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG, | ||
355 | .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG, | ||
356 | .req_canceled = vtpm_proxy_tpm_req_canceled, | ||
357 | }; | ||
358 | |||
359 | /* | ||
360 | * Code related to the startup of the TPM 2 and startup of TPM 1.2 + | ||
361 | * retrieval of timeouts and durations. | ||
362 | */ | ||
363 | |||
364 | static void vtpm_proxy_work(struct work_struct *work) | ||
365 | { | ||
366 | struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev, | ||
367 | work); | ||
368 | int rc; | ||
369 | |||
370 | rc = tpm_chip_register(proxy_dev->chip); | ||
371 | if (rc) | ||
372 | goto err; | ||
373 | |||
374 | return; | ||
375 | |||
376 | err: | ||
377 | vtpm_proxy_fops_undo_open(proxy_dev); | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * vtpm_proxy_work_stop: make sure the work has finished | ||
382 | * | ||
383 | * This function is useful when user space closed the fd | ||
384 | * while the driver still determines timeouts. | ||
385 | */ | ||
386 | static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev) | ||
387 | { | ||
388 | vtpm_proxy_fops_undo_open(proxy_dev); | ||
389 | flush_work(&proxy_dev->work); | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization | ||
394 | */ | ||
395 | static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev) | ||
396 | { | ||
397 | queue_work(workqueue, &proxy_dev->work); | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Code related to creation and deletion of device pairs | ||
402 | */ | ||
403 | static struct proxy_dev *vtpm_proxy_create_proxy_dev(void) | ||
404 | { | ||
405 | struct proxy_dev *proxy_dev; | ||
406 | struct tpm_chip *chip; | ||
407 | int err; | ||
408 | |||
409 | proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL); | ||
410 | if (proxy_dev == NULL) | ||
411 | return ERR_PTR(-ENOMEM); | ||
412 | |||
413 | init_waitqueue_head(&proxy_dev->wq); | ||
414 | mutex_init(&proxy_dev->buf_lock); | ||
415 | INIT_WORK(&proxy_dev->work, vtpm_proxy_work); | ||
416 | |||
417 | chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops); | ||
418 | if (IS_ERR(chip)) { | ||
419 | err = PTR_ERR(chip); | ||
420 | goto err_proxy_dev_free; | ||
421 | } | ||
422 | dev_set_drvdata(&chip->dev, proxy_dev); | ||
423 | |||
424 | proxy_dev->chip = chip; | ||
425 | |||
426 | return proxy_dev; | ||
427 | |||
428 | err_proxy_dev_free: | ||
429 | kfree(proxy_dev); | ||
430 | |||
431 | return ERR_PTR(err); | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * Undo what has been done in vtpm_create_proxy_dev | ||
436 | */ | ||
437 | static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev) | ||
438 | { | ||
439 | put_device(&proxy_dev->chip->dev); /* frees chip */ | ||
440 | kfree(proxy_dev); | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * Create a /dev/tpm%d and 'server side' file descriptor pair | ||
445 | * | ||
446 | * Return value: | ||
447 | * Returns file pointer on success, an error value otherwise | ||
448 | */ | ||
449 | static struct file *vtpm_proxy_create_device( | ||
450 | struct vtpm_proxy_new_dev *vtpm_new_dev) | ||
451 | { | ||
452 | struct proxy_dev *proxy_dev; | ||
453 | int rc, fd; | ||
454 | struct file *file; | ||
455 | |||
456 | if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL) | ||
457 | return ERR_PTR(-EOPNOTSUPP); | ||
458 | |||
459 | proxy_dev = vtpm_proxy_create_proxy_dev(); | ||
460 | if (IS_ERR(proxy_dev)) | ||
461 | return ERR_CAST(proxy_dev); | ||
462 | |||
463 | proxy_dev->flags = vtpm_new_dev->flags; | ||
464 | |||
465 | /* setup an anonymous file for the server-side */ | ||
466 | fd = get_unused_fd_flags(O_RDWR); | ||
467 | if (fd < 0) { | ||
468 | rc = fd; | ||
469 | goto err_delete_proxy_dev; | ||
470 | } | ||
471 | |||
472 | file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev, | ||
473 | O_RDWR); | ||
474 | if (IS_ERR(file)) { | ||
475 | rc = PTR_ERR(file); | ||
476 | goto err_put_unused_fd; | ||
477 | } | ||
478 | |||
479 | /* from now on we can unwind with put_unused_fd() + fput() */ | ||
480 | /* simulate an open() on the server side */ | ||
481 | vtpm_proxy_fops_open(file); | ||
482 | |||
483 | if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2) | ||
484 | proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2; | ||
485 | |||
486 | vtpm_proxy_work_start(proxy_dev); | ||
487 | |||
488 | vtpm_new_dev->fd = fd; | ||
489 | vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt); | ||
490 | vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt); | ||
491 | vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num; | ||
492 | |||
493 | return file; | ||
494 | |||
495 | err_put_unused_fd: | ||
496 | put_unused_fd(fd); | ||
497 | |||
498 | err_delete_proxy_dev: | ||
499 | vtpm_proxy_delete_proxy_dev(proxy_dev); | ||
500 | |||
501 | return ERR_PTR(rc); | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Counter part to vtpm_create_device. | ||
506 | */ | ||
507 | static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev) | ||
508 | { | ||
509 | vtpm_proxy_work_stop(proxy_dev); | ||
510 | |||
511 | /* | ||
512 | * A client may hold the 'ops' lock, so let it know that the server | ||
513 | * side shuts down before we try to grab the 'ops' lock when | ||
514 | * unregistering the chip. | ||
515 | */ | ||
516 | vtpm_proxy_fops_undo_open(proxy_dev); | ||
517 | |||
518 | tpm_chip_unregister(proxy_dev->chip); | ||
519 | |||
520 | vtpm_proxy_delete_proxy_dev(proxy_dev); | ||
521 | } | ||
522 | |||
523 | /* | ||
524 | * Code related to the control device /dev/vtpmx | ||
525 | */ | ||
526 | |||
527 | /* | ||
528 | * vtpmx_fops_ioctl: ioctl on /dev/vtpmx | ||
529 | * | ||
530 | * Return value: | ||
531 | * Returns 0 on success, a negative error code otherwise. | ||
532 | */ | ||
533 | static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl, | ||
534 | unsigned long arg) | ||
535 | { | ||
536 | void __user *argp = (void __user *)arg; | ||
537 | struct vtpm_proxy_new_dev __user *vtpm_new_dev_p; | ||
538 | struct vtpm_proxy_new_dev vtpm_new_dev; | ||
539 | struct file *file; | ||
540 | |||
541 | switch (ioctl) { | ||
542 | case VTPM_PROXY_IOC_NEW_DEV: | ||
543 | if (!capable(CAP_SYS_ADMIN)) | ||
544 | return -EPERM; | ||
545 | vtpm_new_dev_p = argp; | ||
546 | if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p, | ||
547 | sizeof(vtpm_new_dev))) | ||
548 | return -EFAULT; | ||
549 | file = vtpm_proxy_create_device(&vtpm_new_dev); | ||
550 | if (IS_ERR(file)) | ||
551 | return PTR_ERR(file); | ||
552 | if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev, | ||
553 | sizeof(vtpm_new_dev))) { | ||
554 | put_unused_fd(vtpm_new_dev.fd); | ||
555 | fput(file); | ||
556 | return -EFAULT; | ||
557 | } | ||
558 | |||
559 | fd_install(vtpm_new_dev.fd, file); | ||
560 | return 0; | ||
561 | |||
562 | default: | ||
563 | return -ENOIOCTLCMD; | ||
564 | } | ||
565 | } | ||
566 | |||
567 | #ifdef CONFIG_COMPAT | ||
568 | static long vtpmx_fops_compat_ioctl(struct file *f, unsigned int ioctl, | ||
569 | unsigned long arg) | ||
570 | { | ||
571 | return vtpmx_fops_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); | ||
572 | } | ||
573 | #endif | ||
574 | |||
575 | static const struct file_operations vtpmx_fops = { | ||
576 | .owner = THIS_MODULE, | ||
577 | .unlocked_ioctl = vtpmx_fops_ioctl, | ||
578 | #ifdef CONFIG_COMPAT | ||
579 | .compat_ioctl = vtpmx_fops_compat_ioctl, | ||
580 | #endif | ||
581 | .llseek = noop_llseek, | ||
582 | }; | ||
583 | |||
584 | static struct miscdevice vtpmx_miscdev = { | ||
585 | .minor = MISC_DYNAMIC_MINOR, | ||
586 | .name = "vtpmx", | ||
587 | .fops = &vtpmx_fops, | ||
588 | }; | ||
589 | |||
590 | static int vtpmx_init(void) | ||
591 | { | ||
592 | return misc_register(&vtpmx_miscdev); | ||
593 | } | ||
594 | |||
595 | static void vtpmx_cleanup(void) | ||
596 | { | ||
597 | misc_deregister(&vtpmx_miscdev); | ||
598 | } | ||
599 | |||
600 | static int __init vtpm_module_init(void) | ||
601 | { | ||
602 | int rc; | ||
603 | |||
604 | rc = vtpmx_init(); | ||
605 | if (rc) { | ||
606 | pr_err("couldn't create vtpmx device\n"); | ||
607 | return rc; | ||
608 | } | ||
609 | |||
610 | workqueue = create_workqueue("tpm-vtpm"); | ||
611 | if (!workqueue) { | ||
612 | pr_err("couldn't create workqueue\n"); | ||
613 | rc = -ENOMEM; | ||
614 | goto err_vtpmx_cleanup; | ||
615 | } | ||
616 | |||
617 | return 0; | ||
618 | |||
619 | err_vtpmx_cleanup: | ||
620 | vtpmx_cleanup(); | ||
621 | |||
622 | return rc; | ||
623 | } | ||
624 | |||
625 | static void __exit vtpm_module_exit(void) | ||
626 | { | ||
627 | destroy_workqueue(workqueue); | ||
628 | vtpmx_cleanup(); | ||
629 | } | ||
630 | |||
631 | module_init(vtpm_module_init); | ||
632 | module_exit(vtpm_module_exit); | ||
633 | |||
634 | MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)"); | ||
635 | MODULE_DESCRIPTION("vTPM Driver"); | ||
636 | MODULE_VERSION("0.1"); | ||
637 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 3111f2778079..62028f483bba 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c | |||
@@ -28,6 +28,8 @@ struct tpm_private { | |||
28 | unsigned int evtchn; | 28 | unsigned int evtchn; |
29 | int ring_ref; | 29 | int ring_ref; |
30 | domid_t backend_id; | 30 | domid_t backend_id; |
31 | int irq; | ||
32 | wait_queue_head_t read_queue; | ||
31 | }; | 33 | }; |
32 | 34 | ||
33 | enum status_bits { | 35 | enum status_bits { |
@@ -39,7 +41,7 @@ enum status_bits { | |||
39 | 41 | ||
40 | static u8 vtpm_status(struct tpm_chip *chip) | 42 | static u8 vtpm_status(struct tpm_chip *chip) |
41 | { | 43 | { |
42 | struct tpm_private *priv = TPM_VPRIV(chip); | 44 | struct tpm_private *priv = dev_get_drvdata(&chip->dev); |
43 | switch (priv->shr->state) { | 45 | switch (priv->shr->state) { |
44 | case VTPM_STATE_IDLE: | 46 | case VTPM_STATE_IDLE: |
45 | return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; | 47 | return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; |
@@ -60,7 +62,7 @@ static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status) | |||
60 | 62 | ||
61 | static void vtpm_cancel(struct tpm_chip *chip) | 63 | static void vtpm_cancel(struct tpm_chip *chip) |
62 | { | 64 | { |
63 | struct tpm_private *priv = TPM_VPRIV(chip); | 65 | struct tpm_private *priv = dev_get_drvdata(&chip->dev); |
64 | priv->shr->state = VTPM_STATE_CANCEL; | 66 | priv->shr->state = VTPM_STATE_CANCEL; |
65 | wmb(); | 67 | wmb(); |
66 | notify_remote_via_evtchn(priv->evtchn); | 68 | notify_remote_via_evtchn(priv->evtchn); |
@@ -73,7 +75,7 @@ static unsigned int shr_data_offset(struct vtpm_shared_page *shr) | |||
73 | 75 | ||
74 | static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | 76 | static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) |
75 | { | 77 | { |
76 | struct tpm_private *priv = TPM_VPRIV(chip); | 78 | struct tpm_private *priv = dev_get_drvdata(&chip->dev); |
77 | struct vtpm_shared_page *shr = priv->shr; | 79 | struct vtpm_shared_page *shr = priv->shr; |
78 | unsigned int offset = shr_data_offset(shr); | 80 | unsigned int offset = shr_data_offset(shr); |
79 | 81 | ||
@@ -87,8 +89,8 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
87 | return -EINVAL; | 89 | return -EINVAL; |
88 | 90 | ||
89 | /* Wait for completion of any existing command or cancellation */ | 91 | /* Wait for completion of any existing command or cancellation */ |
90 | if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c, | 92 | if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c, |
91 | &chip->vendor.read_queue, true) < 0) { | 93 | &priv->read_queue, true) < 0) { |
92 | vtpm_cancel(chip); | 94 | vtpm_cancel(chip); |
93 | return -ETIME; | 95 | return -ETIME; |
94 | } | 96 | } |
@@ -104,7 +106,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
104 | duration = tpm_calc_ordinal_duration(chip, ordinal); | 106 | duration = tpm_calc_ordinal_duration(chip, ordinal); |
105 | 107 | ||
106 | if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, | 108 | if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, |
107 | &chip->vendor.read_queue, true) < 0) { | 109 | &priv->read_queue, true) < 0) { |
108 | /* got a signal or timeout, try to cancel */ | 110 | /* got a signal or timeout, try to cancel */ |
109 | vtpm_cancel(chip); | 111 | vtpm_cancel(chip); |
110 | return -ETIME; | 112 | return -ETIME; |
@@ -115,7 +117,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
115 | 117 | ||
116 | static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 118 | static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
117 | { | 119 | { |
118 | struct tpm_private *priv = TPM_VPRIV(chip); | 120 | struct tpm_private *priv = dev_get_drvdata(&chip->dev); |
119 | struct vtpm_shared_page *shr = priv->shr; | 121 | struct vtpm_shared_page *shr = priv->shr; |
120 | unsigned int offset = shr_data_offset(shr); | 122 | unsigned int offset = shr_data_offset(shr); |
121 | size_t length = shr->length; | 123 | size_t length = shr->length; |
@@ -124,8 +126,8 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
124 | return -ECANCELED; | 126 | return -ECANCELED; |
125 | 127 | ||
126 | /* In theory the wait at the end of _send makes this one unnecessary */ | 128 | /* In theory the wait at the end of _send makes this one unnecessary */ |
127 | if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c, | 129 | if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c, |
128 | &chip->vendor.read_queue, true) < 0) { | 130 | &priv->read_queue, true) < 0) { |
129 | vtpm_cancel(chip); | 131 | vtpm_cancel(chip); |
130 | return -ETIME; | 132 | return -ETIME; |
131 | } | 133 | } |
@@ -161,7 +163,7 @@ static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) | |||
161 | switch (priv->shr->state) { | 163 | switch (priv->shr->state) { |
162 | case VTPM_STATE_IDLE: | 164 | case VTPM_STATE_IDLE: |
163 | case VTPM_STATE_FINISH: | 165 | case VTPM_STATE_FINISH: |
164 | wake_up_interruptible(&priv->chip->vendor.read_queue); | 166 | wake_up_interruptible(&priv->read_queue); |
165 | break; | 167 | break; |
166 | case VTPM_STATE_SUBMIT: | 168 | case VTPM_STATE_SUBMIT: |
167 | case VTPM_STATE_CANCEL: | 169 | case VTPM_STATE_CANCEL: |
@@ -179,10 +181,10 @@ static int setup_chip(struct device *dev, struct tpm_private *priv) | |||
179 | if (IS_ERR(chip)) | 181 | if (IS_ERR(chip)) |
180 | return PTR_ERR(chip); | 182 | return PTR_ERR(chip); |
181 | 183 | ||
182 | init_waitqueue_head(&chip->vendor.read_queue); | 184 | init_waitqueue_head(&priv->read_queue); |
183 | 185 | ||
184 | priv->chip = chip; | 186 | priv->chip = chip; |
185 | TPM_VPRIV(chip) = priv; | 187 | dev_set_drvdata(&chip->dev, priv); |
186 | 188 | ||
187 | return 0; | 189 | return 0; |
188 | } | 190 | } |
@@ -217,7 +219,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) | |||
217 | xenbus_dev_fatal(dev, rv, "allocating TPM irq"); | 219 | xenbus_dev_fatal(dev, rv, "allocating TPM irq"); |
218 | return rv; | 220 | return rv; |
219 | } | 221 | } |
220 | priv->chip->vendor.irq = rv; | 222 | priv->irq = rv; |
221 | 223 | ||
222 | again: | 224 | again: |
223 | rv = xenbus_transaction_start(&xbt); | 225 | rv = xenbus_transaction_start(&xbt); |
@@ -277,8 +279,8 @@ static void ring_free(struct tpm_private *priv) | |||
277 | else | 279 | else |
278 | free_page((unsigned long)priv->shr); | 280 | free_page((unsigned long)priv->shr); |
279 | 281 | ||
280 | if (priv->chip && priv->chip->vendor.irq) | 282 | if (priv->irq) |
281 | unbind_from_irqhandler(priv->chip->vendor.irq, priv); | 283 | unbind_from_irqhandler(priv->irq, priv); |
282 | 284 | ||
283 | kfree(priv); | 285 | kfree(priv); |
284 | } | 286 | } |
@@ -318,10 +320,10 @@ static int tpmfront_probe(struct xenbus_device *dev, | |||
318 | static int tpmfront_remove(struct xenbus_device *dev) | 320 | static int tpmfront_remove(struct xenbus_device *dev) |
319 | { | 321 | { |
320 | struct tpm_chip *chip = dev_get_drvdata(&dev->dev); | 322 | struct tpm_chip *chip = dev_get_drvdata(&dev->dev); |
321 | struct tpm_private *priv = TPM_VPRIV(chip); | 323 | struct tpm_private *priv = dev_get_drvdata(&chip->dev); |
322 | tpm_chip_unregister(chip); | 324 | tpm_chip_unregister(chip); |
323 | ring_free(priv); | 325 | ring_free(priv); |
324 | TPM_VPRIV(chip) = NULL; | 326 | dev_set_drvdata(&chip->dev, NULL); |
325 | return 0; | 327 | return 0; |
326 | } | 328 | } |
327 | 329 | ||
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index fc4875433817..5de0673f333b 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h | |||
@@ -51,7 +51,7 @@ struct krb5_principal { | |||
51 | struct krb5_tagged_data { | 51 | struct krb5_tagged_data { |
52 | /* for tag value, see /usr/include/krb5/krb5.h | 52 | /* for tag value, see /usr/include/krb5/krb5.h |
53 | * - KRB5_AUTHDATA_* for auth data | 53 | * - KRB5_AUTHDATA_* for auth data |
54 | * - | 54 | * - |
55 | */ | 55 | */ |
56 | s32 tag; | 56 | s32 tag; |
57 | u32 data_len; | 57 | u32 data_len; |
diff --git a/include/linux/capability.h b/include/linux/capability.h index 00690ff92edf..5f3c63dde2d5 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -206,6 +206,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, | |||
206 | struct user_namespace *ns, int cap); | 206 | struct user_namespace *ns, int cap); |
207 | extern bool capable(int cap); | 207 | extern bool capable(int cap); |
208 | extern bool ns_capable(struct user_namespace *ns, int cap); | 208 | extern bool ns_capable(struct user_namespace *ns, int cap); |
209 | extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); | ||
209 | #else | 210 | #else |
210 | static inline bool has_capability(struct task_struct *t, int cap) | 211 | static inline bool has_capability(struct task_struct *t, int cap) |
211 | { | 212 | { |
@@ -233,6 +234,10 @@ static inline bool ns_capable(struct user_namespace *ns, int cap) | |||
233 | { | 234 | { |
234 | return true; | 235 | return true; |
235 | } | 236 | } |
237 | static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) | ||
238 | { | ||
239 | return true; | ||
240 | } | ||
236 | #endif /* CONFIG_MULTIUSER */ | 241 | #endif /* CONFIG_MULTIUSER */ |
237 | extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); | 242 | extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); |
238 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); | 243 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); |
diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h index 817dfdb37885..6f0fb6ebd7db 100644 --- a/include/linux/platform_data/st33zp24.h +++ b/include/linux/platform_data/st33zp24.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24 | 2 | * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24 |
3 | * Copyright (C) 2009 - 2015 STMicroelectronics | 3 | * Copyright (C) 2009 - 2016 STMicroelectronics |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 2296e6b2f690..ecc296c137cd 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h | |||
@@ -28,19 +28,13 @@ struct seccomp { | |||
28 | }; | 28 | }; |
29 | 29 | ||
30 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER | 30 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER |
31 | extern int __secure_computing(void); | 31 | extern int __secure_computing(const struct seccomp_data *sd); |
32 | static inline int secure_computing(void) | 32 | static inline int secure_computing(const struct seccomp_data *sd) |
33 | { | 33 | { |
34 | if (unlikely(test_thread_flag(TIF_SECCOMP))) | 34 | if (unlikely(test_thread_flag(TIF_SECCOMP))) |
35 | return __secure_computing(); | 35 | return __secure_computing(sd); |
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
38 | |||
39 | #define SECCOMP_PHASE1_OK 0 | ||
40 | #define SECCOMP_PHASE1_SKIP 1 | ||
41 | |||
42 | extern u32 seccomp_phase1(struct seccomp_data *sd); | ||
43 | int seccomp_phase2(u32 phase1_result); | ||
44 | #else | 38 | #else |
45 | extern void secure_computing_strict(int this_syscall); | 39 | extern void secure_computing_strict(int this_syscall); |
46 | #endif | 40 | #endif |
@@ -61,7 +55,7 @@ struct seccomp { }; | |||
61 | struct seccomp_filter { }; | 55 | struct seccomp_filter { }; |
62 | 56 | ||
63 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER | 57 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER |
64 | static inline int secure_computing(void) { return 0; } | 58 | static inline int secure_computing(struct seccomp_data *sd) { return 0; } |
65 | #else | 59 | #else |
66 | static inline void secure_computing_strict(int this_syscall) { return; } | 60 | static inline void secure_computing_strict(int this_syscall) { return; } |
67 | #endif | 61 | #endif |
diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 706e63eea080..da158f06e0b2 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h | |||
@@ -33,7 +33,12 @@ struct tpm_chip; | |||
33 | struct trusted_key_payload; | 33 | struct trusted_key_payload; |
34 | struct trusted_key_options; | 34 | struct trusted_key_options; |
35 | 35 | ||
36 | enum TPM_OPS_FLAGS { | ||
37 | TPM_OPS_AUTO_STARTUP = BIT(0), | ||
38 | }; | ||
39 | |||
36 | struct tpm_class_ops { | 40 | struct tpm_class_ops { |
41 | unsigned int flags; | ||
37 | const u8 req_complete_mask; | 42 | const u8 req_complete_mask; |
38 | const u8 req_complete_val; | 43 | const u8 req_complete_val; |
39 | bool (*req_canceled)(struct tpm_chip *chip, u8 status); | 44 | bool (*req_canceled)(struct tpm_chip *chip, u8 status); |
diff --git a/include/net/calipso.h b/include/net/calipso.h new file mode 100644 index 000000000000..b1b30cd36601 --- /dev/null +++ b/include/net/calipso.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * CALIPSO - Common Architecture Label IPv6 Security Option | ||
3 | * | ||
4 | * This is an implementation of the CALIPSO protocol as specified in | ||
5 | * RFC 5570. | ||
6 | * | ||
7 | * Authors: Paul Moore <paul@paul-moore.com> | ||
8 | * Huw Davies <huw@codeweavers.com> | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 | ||
14 | * (c) Copyright Huw Davies <huw@codeweavers.com>, 2015 | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License as published by | ||
18 | * the Free Software Foundation; either version 2 of the License, or | ||
19 | * (at your option) any later version. | ||
20 | * | ||
21 | * This program is distributed in the hope that it will be useful, | ||
22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
24 | * the GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #ifndef _CALIPSO_H | ||
32 | #define _CALIPSO_H | ||
33 | |||
34 | #include <linux/types.h> | ||
35 | #include <linux/rcupdate.h> | ||
36 | #include <linux/list.h> | ||
37 | #include <linux/net.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <net/netlabel.h> | ||
40 | #include <net/request_sock.h> | ||
41 | #include <linux/atomic.h> | ||
42 | #include <asm/unaligned.h> | ||
43 | |||
44 | /* known doi values */ | ||
45 | #define CALIPSO_DOI_UNKNOWN 0x00000000 | ||
46 | |||
47 | /* doi mapping types */ | ||
48 | #define CALIPSO_MAP_UNKNOWN 0 | ||
49 | #define CALIPSO_MAP_PASS 2 | ||
50 | |||
51 | /* | ||
52 | * CALIPSO DOI definitions | ||
53 | */ | ||
54 | |||
55 | /* DOI definition struct */ | ||
56 | struct calipso_doi { | ||
57 | u32 doi; | ||
58 | u32 type; | ||
59 | |||
60 | atomic_t refcount; | ||
61 | struct list_head list; | ||
62 | struct rcu_head rcu; | ||
63 | }; | ||
64 | |||
65 | /* | ||
66 | * Sysctl Variables | ||
67 | */ | ||
68 | extern int calipso_cache_enabled; | ||
69 | extern int calipso_cache_bucketsize; | ||
70 | |||
71 | #ifdef CONFIG_NETLABEL | ||
72 | int __init calipso_init(void); | ||
73 | void calipso_exit(void); | ||
74 | bool calipso_validate(const struct sk_buff *skb, const unsigned char *option); | ||
75 | #else | ||
76 | static inline int __init calipso_init(void) | ||
77 | { | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static inline void calipso_exit(void) | ||
82 | { | ||
83 | } | ||
84 | static inline bool calipso_validate(const struct sk_buff *skb, | ||
85 | const unsigned char *option) | ||
86 | { | ||
87 | return true; | ||
88 | } | ||
89 | #endif /* CONFIG_NETLABEL */ | ||
90 | |||
91 | #endif /* _CALIPSO_H */ | ||
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 012b1f91f3ec..236a81034fef 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
@@ -97,7 +97,12 @@ struct inet_request_sock { | |||
97 | u32 ir_mark; | 97 | u32 ir_mark; |
98 | union { | 98 | union { |
99 | struct ip_options_rcu *opt; | 99 | struct ip_options_rcu *opt; |
100 | struct sk_buff *pktopts; | 100 | #if IS_ENABLED(CONFIG_IPV6) |
101 | struct { | ||
102 | struct ipv6_txoptions *ipv6_opt; | ||
103 | struct sk_buff *pktopts; | ||
104 | }; | ||
105 | #endif | ||
101 | }; | 106 | }; |
102 | }; | 107 | }; |
103 | 108 | ||
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 11a045281948..8fed1cd78658 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -313,11 +313,19 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, | |||
313 | int newtype, | 313 | int newtype, |
314 | struct ipv6_opt_hdr __user *newopt, | 314 | struct ipv6_opt_hdr __user *newopt, |
315 | int newoptlen); | 315 | int newoptlen); |
316 | struct ipv6_txoptions * | ||
317 | ipv6_renew_options_kern(struct sock *sk, | ||
318 | struct ipv6_txoptions *opt, | ||
319 | int newtype, | ||
320 | struct ipv6_opt_hdr *newopt, | ||
321 | int newoptlen); | ||
316 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, | 322 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, |
317 | struct ipv6_txoptions *opt); | 323 | struct ipv6_txoptions *opt); |
318 | 324 | ||
319 | bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, | 325 | bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, |
320 | const struct inet6_skb_parm *opt); | 326 | const struct inet6_skb_parm *opt); |
327 | struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | ||
328 | struct ipv6_txoptions *opt); | ||
321 | 329 | ||
322 | static inline bool ipv6_accept_ra(struct inet6_dev *idev) | 330 | static inline bool ipv6_accept_ra(struct inet6_dev *idev) |
323 | { | 331 | { |
@@ -943,7 +951,7 @@ enum { | |||
943 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, | 951 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, |
944 | unsigned short *fragoff, int *fragflg); | 952 | unsigned short *fragoff, int *fragflg); |
945 | 953 | ||
946 | int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); | 954 | int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type); |
947 | 955 | ||
948 | struct in6_addr *fl6_update_dst(struct flowi6 *fl6, | 956 | struct in6_addr *fl6_update_dst(struct flowi6 *fl6, |
949 | const struct ipv6_txoptions *opt, | 957 | const struct ipv6_txoptions *opt, |
diff --git a/include/net/netlabel.h b/include/net/netlabel.h index 7b5a300de7f5..efe98068880f 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/atomic.h> | 40 | #include <linux/atomic.h> |
41 | 41 | ||
42 | struct cipso_v4_doi; | 42 | struct cipso_v4_doi; |
43 | struct calipso_doi; | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * NetLabel - A management interface for maintaining network packet label | 46 | * NetLabel - A management interface for maintaining network packet label |
@@ -94,6 +95,8 @@ struct cipso_v4_doi; | |||
94 | #define NETLBL_NLTYPE_UNLABELED_NAME "NLBL_UNLBL" | 95 | #define NETLBL_NLTYPE_UNLABELED_NAME "NLBL_UNLBL" |
95 | #define NETLBL_NLTYPE_ADDRSELECT 6 | 96 | #define NETLBL_NLTYPE_ADDRSELECT 6 |
96 | #define NETLBL_NLTYPE_ADDRSELECT_NAME "NLBL_ADRSEL" | 97 | #define NETLBL_NLTYPE_ADDRSELECT_NAME "NLBL_ADRSEL" |
98 | #define NETLBL_NLTYPE_CALIPSO 7 | ||
99 | #define NETLBL_NLTYPE_CALIPSO_NAME "NLBL_CALIPSO" | ||
97 | 100 | ||
98 | /* | 101 | /* |
99 | * NetLabel - Kernel API for accessing the network packet label mappings. | 102 | * NetLabel - Kernel API for accessing the network packet label mappings. |
@@ -216,6 +219,63 @@ struct netlbl_lsm_secattr { | |||
216 | } attr; | 219 | } attr; |
217 | }; | 220 | }; |
218 | 221 | ||
222 | /** | ||
223 | * struct netlbl_calipso_ops - NetLabel CALIPSO operations | ||
224 | * @doi_add: add a CALIPSO DOI | ||
225 | * @doi_free: free a CALIPSO DOI | ||
226 | * @doi_getdef: returns a reference to a DOI | ||
227 | * @doi_putdef: releases a reference of a DOI | ||
228 | * @doi_walk: enumerate the DOI list | ||
229 | * @sock_getattr: retrieve the socket's attr | ||
230 | * @sock_setattr: set the socket's attr | ||
231 | * @sock_delattr: remove the socket's attr | ||
232 | * @req_setattr: set the req socket's attr | ||
233 | * @req_delattr: remove the req socket's attr | ||
234 | * @opt_getattr: retrieve attr from memory block | ||
235 | * @skbuff_optptr: find option in packet | ||
236 | * @skbuff_setattr: set the skbuff's attr | ||
237 | * @skbuff_delattr: remove the skbuff's attr | ||
238 | * @cache_invalidate: invalidate cache | ||
239 | * @cache_add: add cache entry | ||
240 | * | ||
241 | * Description: | ||
242 | * This structure is filled out by the CALIPSO engine and passed | ||
243 | * to the NetLabel core via a call to netlbl_calipso_ops_register(). | ||
244 | * It enables the CALIPSO engine (and hence IPv6) to be compiled | ||
245 | * as a module. | ||
246 | */ | ||
247 | struct netlbl_calipso_ops { | ||
248 | int (*doi_add)(struct calipso_doi *doi_def, | ||
249 | struct netlbl_audit *audit_info); | ||
250 | void (*doi_free)(struct calipso_doi *doi_def); | ||
251 | int (*doi_remove)(u32 doi, struct netlbl_audit *audit_info); | ||
252 | struct calipso_doi *(*doi_getdef)(u32 doi); | ||
253 | void (*doi_putdef)(struct calipso_doi *doi_def); | ||
254 | int (*doi_walk)(u32 *skip_cnt, | ||
255 | int (*callback)(struct calipso_doi *doi_def, void *arg), | ||
256 | void *cb_arg); | ||
257 | int (*sock_getattr)(struct sock *sk, | ||
258 | struct netlbl_lsm_secattr *secattr); | ||
259 | int (*sock_setattr)(struct sock *sk, | ||
260 | const struct calipso_doi *doi_def, | ||
261 | const struct netlbl_lsm_secattr *secattr); | ||
262 | void (*sock_delattr)(struct sock *sk); | ||
263 | int (*req_setattr)(struct request_sock *req, | ||
264 | const struct calipso_doi *doi_def, | ||
265 | const struct netlbl_lsm_secattr *secattr); | ||
266 | void (*req_delattr)(struct request_sock *req); | ||
267 | int (*opt_getattr)(const unsigned char *calipso, | ||
268 | struct netlbl_lsm_secattr *secattr); | ||
269 | unsigned char *(*skbuff_optptr)(const struct sk_buff *skb); | ||
270 | int (*skbuff_setattr)(struct sk_buff *skb, | ||
271 | const struct calipso_doi *doi_def, | ||
272 | const struct netlbl_lsm_secattr *secattr); | ||
273 | int (*skbuff_delattr)(struct sk_buff *skb); | ||
274 | void (*cache_invalidate)(void); | ||
275 | int (*cache_add)(const unsigned char *calipso_ptr, | ||
276 | const struct netlbl_lsm_secattr *secattr); | ||
277 | }; | ||
278 | |||
219 | /* | 279 | /* |
220 | * LSM security attribute operations (inline) | 280 | * LSM security attribute operations (inline) |
221 | */ | 281 | */ |
@@ -385,6 +445,14 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
385 | const struct in_addr *addr, | 445 | const struct in_addr *addr, |
386 | const struct in_addr *mask, | 446 | const struct in_addr *mask, |
387 | struct netlbl_audit *audit_info); | 447 | struct netlbl_audit *audit_info); |
448 | int netlbl_cfg_calipso_add(struct calipso_doi *doi_def, | ||
449 | struct netlbl_audit *audit_info); | ||
450 | void netlbl_cfg_calipso_del(u32 doi, struct netlbl_audit *audit_info); | ||
451 | int netlbl_cfg_calipso_map_add(u32 doi, | ||
452 | const char *domain, | ||
453 | const struct in6_addr *addr, | ||
454 | const struct in6_addr *mask, | ||
455 | struct netlbl_audit *audit_info); | ||
388 | /* | 456 | /* |
389 | * LSM security attribute operations | 457 | * LSM security attribute operations |
390 | */ | 458 | */ |
@@ -405,6 +473,12 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap, | |||
405 | unsigned long bitmap, | 473 | unsigned long bitmap, |
406 | gfp_t flags); | 474 | gfp_t flags); |
407 | 475 | ||
476 | /* Bitmap functions | ||
477 | */ | ||
478 | int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, | ||
479 | u32 offset, u8 state); | ||
480 | void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state); | ||
481 | |||
408 | /* | 482 | /* |
409 | * LSM protocol operations (NetLabel LSM/kernel API) | 483 | * LSM protocol operations (NetLabel LSM/kernel API) |
410 | */ | 484 | */ |
@@ -427,13 +501,13 @@ int netlbl_skbuff_setattr(struct sk_buff *skb, | |||
427 | int netlbl_skbuff_getattr(const struct sk_buff *skb, | 501 | int netlbl_skbuff_getattr(const struct sk_buff *skb, |
428 | u16 family, | 502 | u16 family, |
429 | struct netlbl_lsm_secattr *secattr); | 503 | struct netlbl_lsm_secattr *secattr); |
430 | void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway); | 504 | void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway); |
431 | 505 | ||
432 | /* | 506 | /* |
433 | * LSM label mapping cache operations | 507 | * LSM label mapping cache operations |
434 | */ | 508 | */ |
435 | void netlbl_cache_invalidate(void); | 509 | void netlbl_cache_invalidate(void); |
436 | int netlbl_cache_add(const struct sk_buff *skb, | 510 | int netlbl_cache_add(const struct sk_buff *skb, u16 family, |
437 | const struct netlbl_lsm_secattr *secattr); | 511 | const struct netlbl_lsm_secattr *secattr); |
438 | 512 | ||
439 | /* | 513 | /* |
@@ -495,6 +569,24 @@ static inline int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
495 | { | 569 | { |
496 | return -ENOSYS; | 570 | return -ENOSYS; |
497 | } | 571 | } |
572 | static inline int netlbl_cfg_calipso_add(struct calipso_doi *doi_def, | ||
573 | struct netlbl_audit *audit_info) | ||
574 | { | ||
575 | return -ENOSYS; | ||
576 | } | ||
577 | static inline void netlbl_cfg_calipso_del(u32 doi, | ||
578 | struct netlbl_audit *audit_info) | ||
579 | { | ||
580 | return; | ||
581 | } | ||
582 | static inline int netlbl_cfg_calipso_map_add(u32 doi, | ||
583 | const char *domain, | ||
584 | const struct in6_addr *addr, | ||
585 | const struct in6_addr *mask, | ||
586 | struct netlbl_audit *audit_info) | ||
587 | { | ||
588 | return -ENOSYS; | ||
589 | } | ||
498 | static inline int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, | 590 | static inline int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, |
499 | u32 offset) | 591 | u32 offset) |
500 | { | 592 | { |
@@ -586,7 +678,7 @@ static inline void netlbl_cache_invalidate(void) | |||
586 | { | 678 | { |
587 | return; | 679 | return; |
588 | } | 680 | } |
589 | static inline int netlbl_cache_add(const struct sk_buff *skb, | 681 | static inline int netlbl_cache_add(const struct sk_buff *skb, u16 family, |
590 | const struct netlbl_lsm_secattr *secattr) | 682 | const struct netlbl_lsm_secattr *secattr) |
591 | { | 683 | { |
592 | return 0; | 684 | return 0; |
@@ -598,4 +690,7 @@ static inline struct audit_buffer *netlbl_audit_start(int type, | |||
598 | } | 690 | } |
599 | #endif /* CONFIG_NETLABEL */ | 691 | #endif /* CONFIG_NETLABEL */ |
600 | 692 | ||
693 | const struct netlbl_calipso_ops * | ||
694 | netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops); | ||
695 | |||
601 | #endif /* _NETLABEL_H */ | 696 | #endif /* _NETLABEL_H */ |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index ec10cfef166a..6d4e92ccdc91 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
@@ -455,6 +455,7 @@ header-y += virtio_scsi.h | |||
455 | header-y += virtio_types.h | 455 | header-y += virtio_types.h |
456 | header-y += vm_sockets.h | 456 | header-y += vm_sockets.h |
457 | header-y += vt.h | 457 | header-y += vt.h |
458 | header-y += vtpm_proxy.h | ||
458 | header-y += wait.h | 459 | header-y += wait.h |
459 | header-y += wanrouter.h | 460 | header-y += wanrouter.h |
460 | header-y += watchdog.h | 461 | header-y += watchdog.h |
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d820aa979620..82e8aa59446b 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h | |||
@@ -130,6 +130,8 @@ | |||
130 | #define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */ | 130 | #define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */ |
131 | #define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */ | 131 | #define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */ |
132 | #define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */ | 132 | #define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */ |
133 | #define AUDIT_MAC_CALIPSO_ADD 1418 /* NetLabel: add CALIPSO DOI entry */ | ||
134 | #define AUDIT_MAC_CALIPSO_DEL 1419 /* NetLabel: del CALIPSO DOI entry */ | ||
133 | 135 | ||
134 | #define AUDIT_FIRST_KERN_ANOM_MSG 1700 | 136 | #define AUDIT_FIRST_KERN_ANOM_MSG 1700 |
135 | #define AUDIT_LAST_KERN_ANOM_MSG 1799 | 137 | #define AUDIT_LAST_KERN_ANOM_MSG 1799 |
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index 318a4828bf98..b39ea4f2e701 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h | |||
@@ -143,6 +143,7 @@ struct in6_flowlabel_req { | |||
143 | #define IPV6_TLV_PAD1 0 | 143 | #define IPV6_TLV_PAD1 0 |
144 | #define IPV6_TLV_PADN 1 | 144 | #define IPV6_TLV_PADN 1 |
145 | #define IPV6_TLV_ROUTERALERT 5 | 145 | #define IPV6_TLV_ROUTERALERT 5 |
146 | #define IPV6_TLV_CALIPSO 7 /* RFC 5570 */ | ||
146 | #define IPV6_TLV_JUMBO 194 | 147 | #define IPV6_TLV_JUMBO 194 |
147 | #define IPV6_TLV_HAO 201 /* home address option */ | 148 | #define IPV6_TLV_HAO 201 /* home address option */ |
148 | 149 | ||
diff --git a/include/uapi/linux/vtpm_proxy.h b/include/uapi/linux/vtpm_proxy.h new file mode 100644 index 000000000000..41e8e2252a30 --- /dev/null +++ b/include/uapi/linux/vtpm_proxy.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Definitions for the VTPM proxy driver | ||
3 | * Copyright (c) 2015, 2016, IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _UAPI_LINUX_VTPM_PROXY_H | ||
16 | #define _UAPI_LINUX_VTPM_PROXY_H | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/ioctl.h> | ||
20 | |||
21 | /* ioctls */ | ||
22 | |||
23 | struct vtpm_proxy_new_dev { | ||
24 | __u32 flags; /* input */ | ||
25 | __u32 tpm_num; /* output */ | ||
26 | __u32 fd; /* output */ | ||
27 | __u32 major; /* output */ | ||
28 | __u32 minor; /* output */ | ||
29 | }; | ||
30 | |||
31 | /* above flags */ | ||
32 | #define VTPM_PROXY_FLAG_TPM2 1 /* emulator is TPM 2 */ | ||
33 | |||
34 | #define VTPM_PROXY_IOC_NEW_DEV _IOWR(0xa1, 0x00, struct vtpm_proxy_new_dev) | ||
35 | |||
36 | #endif /* _UAPI_LINUX_VTPM_PROXY_H */ | ||
diff --git a/kernel/capability.c b/kernel/capability.c index 45432b54d5c6..00411c82dac5 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -361,6 +361,24 @@ bool has_capability_noaudit(struct task_struct *t, int cap) | |||
361 | return has_ns_capability_noaudit(t, &init_user_ns, cap); | 361 | return has_ns_capability_noaudit(t, &init_user_ns, cap); |
362 | } | 362 | } |
363 | 363 | ||
364 | static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit) | ||
365 | { | ||
366 | int capable; | ||
367 | |||
368 | if (unlikely(!cap_valid(cap))) { | ||
369 | pr_crit("capable() called with invalid cap=%u\n", cap); | ||
370 | BUG(); | ||
371 | } | ||
372 | |||
373 | capable = audit ? security_capable(current_cred(), ns, cap) : | ||
374 | security_capable_noaudit(current_cred(), ns, cap); | ||
375 | if (capable == 0) { | ||
376 | current->flags |= PF_SUPERPRIV; | ||
377 | return true; | ||
378 | } | ||
379 | return false; | ||
380 | } | ||
381 | |||
364 | /** | 382 | /** |
365 | * ns_capable - Determine if the current task has a superior capability in effect | 383 | * ns_capable - Determine if the current task has a superior capability in effect |
366 | * @ns: The usernamespace we want the capability in | 384 | * @ns: The usernamespace we want the capability in |
@@ -374,19 +392,27 @@ bool has_capability_noaudit(struct task_struct *t, int cap) | |||
374 | */ | 392 | */ |
375 | bool ns_capable(struct user_namespace *ns, int cap) | 393 | bool ns_capable(struct user_namespace *ns, int cap) |
376 | { | 394 | { |
377 | if (unlikely(!cap_valid(cap))) { | 395 | return ns_capable_common(ns, cap, true); |
378 | pr_crit("capable() called with invalid cap=%u\n", cap); | ||
379 | BUG(); | ||
380 | } | ||
381 | |||
382 | if (security_capable(current_cred(), ns, cap) == 0) { | ||
383 | current->flags |= PF_SUPERPRIV; | ||
384 | return true; | ||
385 | } | ||
386 | return false; | ||
387 | } | 396 | } |
388 | EXPORT_SYMBOL(ns_capable); | 397 | EXPORT_SYMBOL(ns_capable); |
389 | 398 | ||
399 | /** | ||
400 | * ns_capable_noaudit - Determine if the current task has a superior capability | ||
401 | * (unaudited) in effect | ||
402 | * @ns: The usernamespace we want the capability in | ||
403 | * @cap: The capability to be tested for | ||
404 | * | ||
405 | * Return true if the current task has the given superior capability currently | ||
406 | * available for use, false if not. | ||
407 | * | ||
408 | * This sets PF_SUPERPRIV on the task if the capability is available on the | ||
409 | * assumption that it's about to be used. | ||
410 | */ | ||
411 | bool ns_capable_noaudit(struct user_namespace *ns, int cap) | ||
412 | { | ||
413 | return ns_capable_common(ns, cap, false); | ||
414 | } | ||
415 | EXPORT_SYMBOL(ns_capable_noaudit); | ||
390 | 416 | ||
391 | /** | 417 | /** |
392 | * capable - Determine if the current task has a superior capability in effect | 418 | * capable - Determine if the current task has a superior capability in effect |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 7002796f14a4..54d15eb2b701 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -173,7 +173,7 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
173 | * | 173 | * |
174 | * Returns valid seccomp BPF response codes. | 174 | * Returns valid seccomp BPF response codes. |
175 | */ | 175 | */ |
176 | static u32 seccomp_run_filters(struct seccomp_data *sd) | 176 | static u32 seccomp_run_filters(const struct seccomp_data *sd) |
177 | { | 177 | { |
178 | struct seccomp_data sd_local; | 178 | struct seccomp_data sd_local; |
179 | u32 ret = SECCOMP_RET_ALLOW; | 179 | u32 ret = SECCOMP_RET_ALLOW; |
@@ -554,20 +554,10 @@ void secure_computing_strict(int this_syscall) | |||
554 | BUG(); | 554 | BUG(); |
555 | } | 555 | } |
556 | #else | 556 | #else |
557 | int __secure_computing(void) | ||
558 | { | ||
559 | u32 phase1_result = seccomp_phase1(NULL); | ||
560 | |||
561 | if (likely(phase1_result == SECCOMP_PHASE1_OK)) | ||
562 | return 0; | ||
563 | else if (likely(phase1_result == SECCOMP_PHASE1_SKIP)) | ||
564 | return -1; | ||
565 | else | ||
566 | return seccomp_phase2(phase1_result); | ||
567 | } | ||
568 | 557 | ||
569 | #ifdef CONFIG_SECCOMP_FILTER | 558 | #ifdef CONFIG_SECCOMP_FILTER |
570 | static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd) | 559 | static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, |
560 | const bool recheck_after_trace) | ||
571 | { | 561 | { |
572 | u32 filter_ret, action; | 562 | u32 filter_ret, action; |
573 | int data; | 563 | int data; |
@@ -599,10 +589,46 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd) | |||
599 | goto skip; | 589 | goto skip; |
600 | 590 | ||
601 | case SECCOMP_RET_TRACE: | 591 | case SECCOMP_RET_TRACE: |
602 | return filter_ret; /* Save the rest for phase 2. */ | 592 | /* We've been put in this state by the ptracer already. */ |
593 | if (recheck_after_trace) | ||
594 | return 0; | ||
595 | |||
596 | /* ENOSYS these calls if there is no tracer attached. */ | ||
597 | if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { | ||
598 | syscall_set_return_value(current, | ||
599 | task_pt_regs(current), | ||
600 | -ENOSYS, 0); | ||
601 | goto skip; | ||
602 | } | ||
603 | |||
604 | /* Allow the BPF to provide the event message */ | ||
605 | ptrace_event(PTRACE_EVENT_SECCOMP, data); | ||
606 | /* | ||
607 | * The delivery of a fatal signal during event | ||
608 | * notification may silently skip tracer notification. | ||
609 | * Terminating the task now avoids executing a system | ||
610 | * call that may not be intended. | ||
611 | */ | ||
612 | if (fatal_signal_pending(current)) | ||
613 | do_exit(SIGSYS); | ||
614 | /* Check if the tracer forced the syscall to be skipped. */ | ||
615 | this_syscall = syscall_get_nr(current, task_pt_regs(current)); | ||
616 | if (this_syscall < 0) | ||
617 | goto skip; | ||
618 | |||
619 | /* | ||
620 | * Recheck the syscall, since it may have changed. This | ||
621 | * intentionally uses a NULL struct seccomp_data to force | ||
622 | * a reload of all registers. This does not goto skip since | ||
623 | * a skip would have already been reported. | ||
624 | */ | ||
625 | if (__seccomp_filter(this_syscall, NULL, true)) | ||
626 | return -1; | ||
627 | |||
628 | return 0; | ||
603 | 629 | ||
604 | case SECCOMP_RET_ALLOW: | 630 | case SECCOMP_RET_ALLOW: |
605 | return SECCOMP_PHASE1_OK; | 631 | return 0; |
606 | 632 | ||
607 | case SECCOMP_RET_KILL: | 633 | case SECCOMP_RET_KILL: |
608 | default: | 634 | default: |
@@ -614,96 +640,38 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd) | |||
614 | 640 | ||
615 | skip: | 641 | skip: |
616 | audit_seccomp(this_syscall, 0, action); | 642 | audit_seccomp(this_syscall, 0, action); |
617 | return SECCOMP_PHASE1_SKIP; | 643 | return -1; |
644 | } | ||
645 | #else | ||
646 | static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, | ||
647 | const bool recheck_after_trace) | ||
648 | { | ||
649 | BUG(); | ||
618 | } | 650 | } |
619 | #endif | 651 | #endif |
620 | 652 | ||
621 | /** | 653 | int __secure_computing(const struct seccomp_data *sd) |
622 | * seccomp_phase1() - run fast path seccomp checks on the current syscall | ||
623 | * @arg sd: The seccomp_data or NULL | ||
624 | * | ||
625 | * This only reads pt_regs via the syscall_xyz helpers. The only change | ||
626 | * it will make to pt_regs is via syscall_set_return_value, and it will | ||
627 | * only do that if it returns SECCOMP_PHASE1_SKIP. | ||
628 | * | ||
629 | * If sd is provided, it will not read pt_regs at all. | ||
630 | * | ||
631 | * It may also call do_exit or force a signal; these actions must be | ||
632 | * safe. | ||
633 | * | ||
634 | * If it returns SECCOMP_PHASE1_OK, the syscall passes checks and should | ||
635 | * be processed normally. | ||
636 | * | ||
637 | * If it returns SECCOMP_PHASE1_SKIP, then the syscall should not be | ||
638 | * invoked. In this case, seccomp_phase1 will have set the return value | ||
639 | * using syscall_set_return_value. | ||
640 | * | ||
641 | * If it returns anything else, then the return value should be passed | ||
642 | * to seccomp_phase2 from a context in which ptrace hooks are safe. | ||
643 | */ | ||
644 | u32 seccomp_phase1(struct seccomp_data *sd) | ||
645 | { | 654 | { |
646 | int mode = current->seccomp.mode; | 655 | int mode = current->seccomp.mode; |
647 | int this_syscall = sd ? sd->nr : | 656 | int this_syscall; |
648 | syscall_get_nr(current, task_pt_regs(current)); | ||
649 | 657 | ||
650 | if (config_enabled(CONFIG_CHECKPOINT_RESTORE) && | 658 | if (config_enabled(CONFIG_CHECKPOINT_RESTORE) && |
651 | unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) | 659 | unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) |
652 | return SECCOMP_PHASE1_OK; | 660 | return 0; |
661 | |||
662 | this_syscall = sd ? sd->nr : | ||
663 | syscall_get_nr(current, task_pt_regs(current)); | ||
653 | 664 | ||
654 | switch (mode) { | 665 | switch (mode) { |
655 | case SECCOMP_MODE_STRICT: | 666 | case SECCOMP_MODE_STRICT: |
656 | __secure_computing_strict(this_syscall); /* may call do_exit */ | 667 | __secure_computing_strict(this_syscall); /* may call do_exit */ |
657 | return SECCOMP_PHASE1_OK; | 668 | return 0; |
658 | #ifdef CONFIG_SECCOMP_FILTER | ||
659 | case SECCOMP_MODE_FILTER: | 669 | case SECCOMP_MODE_FILTER: |
660 | return __seccomp_phase1_filter(this_syscall, sd); | 670 | return __seccomp_filter(this_syscall, sd, false); |
661 | #endif | ||
662 | default: | 671 | default: |
663 | BUG(); | 672 | BUG(); |
664 | } | 673 | } |
665 | } | 674 | } |
666 | |||
667 | /** | ||
668 | * seccomp_phase2() - finish slow path seccomp work for the current syscall | ||
669 | * @phase1_result: The return value from seccomp_phase1() | ||
670 | * | ||
671 | * This must be called from a context in which ptrace hooks can be used. | ||
672 | * | ||
673 | * Returns 0 if the syscall should be processed or -1 to skip the syscall. | ||
674 | */ | ||
675 | int seccomp_phase2(u32 phase1_result) | ||
676 | { | ||
677 | struct pt_regs *regs = task_pt_regs(current); | ||
678 | u32 action = phase1_result & SECCOMP_RET_ACTION; | ||
679 | int data = phase1_result & SECCOMP_RET_DATA; | ||
680 | |||
681 | BUG_ON(action != SECCOMP_RET_TRACE); | ||
682 | |||
683 | audit_seccomp(syscall_get_nr(current, regs), 0, action); | ||
684 | |||
685 | /* Skip these calls if there is no tracer. */ | ||
686 | if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { | ||
687 | syscall_set_return_value(current, regs, | ||
688 | -ENOSYS, 0); | ||
689 | return -1; | ||
690 | } | ||
691 | |||
692 | /* Allow the BPF to provide the event message */ | ||
693 | ptrace_event(PTRACE_EVENT_SECCOMP, data); | ||
694 | /* | ||
695 | * The delivery of a fatal signal during event | ||
696 | * notification may silently skip tracer notification. | ||
697 | * Terminating the task now avoids executing a system | ||
698 | * call that may not be intended. | ||
699 | */ | ||
700 | if (fatal_signal_pending(current)) | ||
701 | do_exit(SIGSYS); | ||
702 | if (syscall_get_nr(current, regs) < 0) | ||
703 | return -1; /* Explicit request to skip. */ | ||
704 | |||
705 | return 0; | ||
706 | } | ||
707 | #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */ | 675 | #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */ |
708 | 676 | ||
709 | long prctl_get_seccomp(void) | 677 | long prctl_get_seccomp(void) |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3ff137d9471d..3828f94b234c 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -216,14 +216,17 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req | |||
216 | skb = dccp_make_response(sk, dst, req); | 216 | skb = dccp_make_response(sk, dst, req); |
217 | if (skb != NULL) { | 217 | if (skb != NULL) { |
218 | struct dccp_hdr *dh = dccp_hdr(skb); | 218 | struct dccp_hdr *dh = dccp_hdr(skb); |
219 | struct ipv6_txoptions *opt; | ||
219 | 220 | ||
220 | dh->dccph_checksum = dccp_v6_csum_finish(skb, | 221 | dh->dccph_checksum = dccp_v6_csum_finish(skb, |
221 | &ireq->ir_v6_loc_addr, | 222 | &ireq->ir_v6_loc_addr, |
222 | &ireq->ir_v6_rmt_addr); | 223 | &ireq->ir_v6_rmt_addr); |
223 | fl6.daddr = ireq->ir_v6_rmt_addr; | 224 | fl6.daddr = ireq->ir_v6_rmt_addr; |
224 | rcu_read_lock(); | 225 | rcu_read_lock(); |
225 | err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), | 226 | opt = ireq->ipv6_opt; |
226 | np->tclass); | 227 | if (!opt) |
228 | opt = rcu_dereference(np->opt); | ||
229 | err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); | ||
227 | rcu_read_unlock(); | 230 | rcu_read_unlock(); |
228 | err = net_xmit_eval(err); | 231 | err = net_xmit_eval(err); |
229 | } | 232 | } |
@@ -236,6 +239,7 @@ done: | |||
236 | static void dccp_v6_reqsk_destructor(struct request_sock *req) | 239 | static void dccp_v6_reqsk_destructor(struct request_sock *req) |
237 | { | 240 | { |
238 | dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); | 241 | dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); |
242 | kfree(inet_rsk(req)->ipv6_opt); | ||
239 | kfree_skb(inet_rsk(req)->pktopts); | 243 | kfree_skb(inet_rsk(req)->pktopts); |
240 | } | 244 | } |
241 | 245 | ||
@@ -494,7 +498,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
494 | * Yes, keeping reference count would be much more clever, but we make | 498 | * Yes, keeping reference count would be much more clever, but we make |
495 | * one more one thing there: reattach optmem to newsk. | 499 | * one more one thing there: reattach optmem to newsk. |
496 | */ | 500 | */ |
497 | opt = rcu_dereference(np->opt); | 501 | opt = ireq->ipv6_opt; |
502 | if (!opt) | ||
503 | opt = rcu_dereference(np->opt); | ||
498 | if (opt) { | 504 | if (opt) { |
499 | opt = ipv6_dup_options(newsk, opt); | 505 | opt = ipv6_dup_options(newsk, opt); |
500 | RCU_INIT_POINTER(newnp->opt, opt); | 506 | RCU_INIT_POINTER(newnp->opt, opt); |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 40d6b87713a1..72d6f056d863 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -135,76 +135,6 @@ int cipso_v4_rbm_strictvalid = 1; | |||
135 | */ | 135 | */ |
136 | 136 | ||
137 | /** | 137 | /** |
138 | * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit | ||
139 | * @bitmap: the bitmap | ||
140 | * @bitmap_len: length in bits | ||
141 | * @offset: starting offset | ||
142 | * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit | ||
143 | * | ||
144 | * Description: | ||
145 | * Starting at @offset, walk the bitmap from left to right until either the | ||
146 | * desired bit is found or we reach the end. Return the bit offset, -1 if | ||
147 | * not found, or -2 if error. | ||
148 | */ | ||
149 | static int cipso_v4_bitmap_walk(const unsigned char *bitmap, | ||
150 | u32 bitmap_len, | ||
151 | u32 offset, | ||
152 | u8 state) | ||
153 | { | ||
154 | u32 bit_spot; | ||
155 | u32 byte_offset; | ||
156 | unsigned char bitmask; | ||
157 | unsigned char byte; | ||
158 | |||
159 | /* gcc always rounds to zero when doing integer division */ | ||
160 | byte_offset = offset / 8; | ||
161 | byte = bitmap[byte_offset]; | ||
162 | bit_spot = offset; | ||
163 | bitmask = 0x80 >> (offset % 8); | ||
164 | |||
165 | while (bit_spot < bitmap_len) { | ||
166 | if ((state && (byte & bitmask) == bitmask) || | ||
167 | (state == 0 && (byte & bitmask) == 0)) | ||
168 | return bit_spot; | ||
169 | |||
170 | bit_spot++; | ||
171 | bitmask >>= 1; | ||
172 | if (bitmask == 0) { | ||
173 | byte = bitmap[++byte_offset]; | ||
174 | bitmask = 0x80; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | return -1; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap | ||
183 | * @bitmap: the bitmap | ||
184 | * @bit: the bit | ||
185 | * @state: if non-zero, set the bit (1) else clear the bit (0) | ||
186 | * | ||
187 | * Description: | ||
188 | * Set a single bit in the bitmask. Returns zero on success, negative values | ||
189 | * on error. | ||
190 | */ | ||
191 | static void cipso_v4_bitmap_setbit(unsigned char *bitmap, | ||
192 | u32 bit, | ||
193 | u8 state) | ||
194 | { | ||
195 | u32 byte_spot; | ||
196 | u8 bitmask; | ||
197 | |||
198 | /* gcc always rounds to zero when doing integer division */ | ||
199 | byte_spot = bit / 8; | ||
200 | bitmask = 0x80 >> (bit % 8); | ||
201 | if (state) | ||
202 | bitmap[byte_spot] |= bitmask; | ||
203 | else | ||
204 | bitmap[byte_spot] &= ~bitmask; | ||
205 | } | ||
206 | |||
207 | /** | ||
208 | * cipso_v4_cache_entry_free - Frees a cache entry | 138 | * cipso_v4_cache_entry_free - Frees a cache entry |
209 | * @entry: the entry to free | 139 | * @entry: the entry to free |
210 | * | 140 | * |
@@ -840,10 +770,10 @@ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, | |||
840 | cipso_cat_size = doi_def->map.std->cat.cipso_size; | 770 | cipso_cat_size = doi_def->map.std->cat.cipso_size; |
841 | cipso_array = doi_def->map.std->cat.cipso; | 771 | cipso_array = doi_def->map.std->cat.cipso; |
842 | for (;;) { | 772 | for (;;) { |
843 | cat = cipso_v4_bitmap_walk(bitmap, | 773 | cat = netlbl_bitmap_walk(bitmap, |
844 | bitmap_len_bits, | 774 | bitmap_len_bits, |
845 | cat + 1, | 775 | cat + 1, |
846 | 1); | 776 | 1); |
847 | if (cat < 0) | 777 | if (cat < 0) |
848 | break; | 778 | break; |
849 | if (cat >= cipso_cat_size || | 779 | if (cat >= cipso_cat_size || |
@@ -909,7 +839,7 @@ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, | |||
909 | } | 839 | } |
910 | if (net_spot >= net_clen_bits) | 840 | if (net_spot >= net_clen_bits) |
911 | return -ENOSPC; | 841 | return -ENOSPC; |
912 | cipso_v4_bitmap_setbit(net_cat, net_spot, 1); | 842 | netlbl_bitmap_setbit(net_cat, net_spot, 1); |
913 | 843 | ||
914 | if (net_spot > net_spot_max) | 844 | if (net_spot > net_spot_max) |
915 | net_spot_max = net_spot; | 845 | net_spot_max = net_spot; |
@@ -951,10 +881,10 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, | |||
951 | } | 881 | } |
952 | 882 | ||
953 | for (;;) { | 883 | for (;;) { |
954 | net_spot = cipso_v4_bitmap_walk(net_cat, | 884 | net_spot = netlbl_bitmap_walk(net_cat, |
955 | net_clen_bits, | 885 | net_clen_bits, |
956 | net_spot + 1, | 886 | net_spot + 1, |
957 | 1); | 887 | 1); |
958 | if (net_spot < 0) { | 888 | if (net_spot < 0) { |
959 | if (net_spot == -2) | 889 | if (net_spot == -2) |
960 | return -EFAULT; | 890 | return -EFAULT; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f9f9e375d7de..3ebf45b38bc3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -6147,6 +6147,9 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, | |||
6147 | 6147 | ||
6148 | kmemcheck_annotate_bitfield(ireq, flags); | 6148 | kmemcheck_annotate_bitfield(ireq, flags); |
6149 | ireq->opt = NULL; | 6149 | ireq->opt = NULL; |
6150 | #if IS_ENABLED(CONFIG_IPV6) | ||
6151 | ireq->pktopts = NULL; | ||
6152 | #endif | ||
6150 | atomic64_set(&ireq->ir_cookie, 0); | 6153 | atomic64_set(&ireq->ir_cookie, 0); |
6151 | ireq->ireq_state = TCP_NEW_SYN_RECV; | 6154 | ireq->ireq_state = TCP_NEW_SYN_RECV; |
6152 | write_pnet(&ireq->ireq_net, sock_net(sk_listener)); | 6155 | write_pnet(&ireq->ireq_net, sock_net(sk_listener)); |
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 6d8ea099213e..c174ccb340a1 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile | |||
@@ -22,6 +22,7 @@ ipv6-$(CONFIG_NETFILTER) += netfilter.o | |||
22 | ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o | 22 | ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o |
23 | ipv6-$(CONFIG_PROC_FS) += proc.o | 23 | ipv6-$(CONFIG_PROC_FS) += proc.o |
24 | ipv6-$(CONFIG_SYN_COOKIES) += syncookies.o | 24 | ipv6-$(CONFIG_SYN_COOKIES) += syncookies.o |
25 | ipv6-$(CONFIG_NETLABEL) += calipso.o | ||
25 | 26 | ||
26 | ipv6-objs += $(ipv6-y) | 27 | ipv6-objs += $(ipv6-y) |
27 | 28 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 2076c21107d0..b454055ba625 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #ifdef CONFIG_IPV6_TUNNEL | 60 | #ifdef CONFIG_IPV6_TUNNEL |
61 | #include <net/ip6_tunnel.h> | 61 | #include <net/ip6_tunnel.h> |
62 | #endif | 62 | #endif |
63 | #include <net/calipso.h> | ||
63 | 64 | ||
64 | #include <asm/uaccess.h> | 65 | #include <asm/uaccess.h> |
65 | #include <linux/mroute6.h> | 66 | #include <linux/mroute6.h> |
@@ -983,6 +984,10 @@ static int __init inet6_init(void) | |||
983 | if (err) | 984 | if (err) |
984 | goto pingv6_fail; | 985 | goto pingv6_fail; |
985 | 986 | ||
987 | err = calipso_init(); | ||
988 | if (err) | ||
989 | goto calipso_fail; | ||
990 | |||
986 | #ifdef CONFIG_SYSCTL | 991 | #ifdef CONFIG_SYSCTL |
987 | err = ipv6_sysctl_register(); | 992 | err = ipv6_sysctl_register(); |
988 | if (err) | 993 | if (err) |
@@ -993,8 +998,10 @@ out: | |||
993 | 998 | ||
994 | #ifdef CONFIG_SYSCTL | 999 | #ifdef CONFIG_SYSCTL |
995 | sysctl_fail: | 1000 | sysctl_fail: |
996 | pingv6_exit(); | 1001 | calipso_exit(); |
997 | #endif | 1002 | #endif |
1003 | calipso_fail: | ||
1004 | pingv6_exit(); | ||
998 | pingv6_fail: | 1005 | pingv6_fail: |
999 | ipv6_packet_cleanup(); | 1006 | ipv6_packet_cleanup(); |
1000 | ipv6_packet_fail: | 1007 | ipv6_packet_fail: |
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c new file mode 100644 index 000000000000..c53b92c617c5 --- /dev/null +++ b/net/ipv6/calipso.c | |||
@@ -0,0 +1,1473 @@ | |||
1 | /* | ||
2 | * CALIPSO - Common Architecture Label IPv6 Security Option | ||
3 | * | ||
4 | * This is an implementation of the CALIPSO protocol as specified in | ||
5 | * RFC 5570. | ||
6 | * | ||
7 | * Authors: Paul Moore <paul.moore@hp.com> | ||
8 | * Huw Davies <huw@codeweavers.com> | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | /* (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 | ||
13 | * (c) Copyright Huw Davies <huw@codeweavers.com>, 2015 | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | * This program is distributed in the hope that it will be useful, | ||
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
23 | * the GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | #include <linux/init.h> | ||
31 | #include <linux/types.h> | ||
32 | #include <linux/rcupdate.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/string.h> | ||
36 | #include <linux/jhash.h> | ||
37 | #include <linux/audit.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <net/ip.h> | ||
40 | #include <net/icmp.h> | ||
41 | #include <net/tcp.h> | ||
42 | #include <net/netlabel.h> | ||
43 | #include <net/calipso.h> | ||
44 | #include <linux/atomic.h> | ||
45 | #include <linux/bug.h> | ||
46 | #include <asm/unaligned.h> | ||
47 | #include <linux/crc-ccitt.h> | ||
48 | |||
49 | /* Maximium size of the calipso option including | ||
50 | * the two-byte TLV header. | ||
51 | */ | ||
52 | #define CALIPSO_OPT_LEN_MAX (2 + 252) | ||
53 | |||
54 | /* Size of the minimum calipso option including | ||
55 | * the two-byte TLV header. | ||
56 | */ | ||
57 | #define CALIPSO_HDR_LEN (2 + 8) | ||
58 | |||
59 | /* Maximium size of the calipso option including | ||
60 | * the two-byte TLV header and upto 3 bytes of | ||
61 | * leading pad and 7 bytes of trailing pad. | ||
62 | */ | ||
63 | #define CALIPSO_OPT_LEN_MAX_WITH_PAD (3 + CALIPSO_OPT_LEN_MAX + 7) | ||
64 | |||
65 | /* Maximium size of u32 aligned buffer required to hold calipso | ||
66 | * option. Max of 3 initial pad bytes starting from buffer + 3. | ||
67 | * i.e. the worst case is when the previous tlv finishes on 4n + 3. | ||
68 | */ | ||
69 | #define CALIPSO_MAX_BUFFER (6 + CALIPSO_OPT_LEN_MAX) | ||
70 | |||
71 | /* List of available DOI definitions */ | ||
72 | static DEFINE_SPINLOCK(calipso_doi_list_lock); | ||
73 | static LIST_HEAD(calipso_doi_list); | ||
74 | |||
75 | /* Label mapping cache */ | ||
76 | int calipso_cache_enabled = 1; | ||
77 | int calipso_cache_bucketsize = 10; | ||
78 | #define CALIPSO_CACHE_BUCKETBITS 7 | ||
79 | #define CALIPSO_CACHE_BUCKETS BIT(CALIPSO_CACHE_BUCKETBITS) | ||
80 | #define CALIPSO_CACHE_REORDERLIMIT 10 | ||
81 | struct calipso_map_cache_bkt { | ||
82 | spinlock_t lock; | ||
83 | u32 size; | ||
84 | struct list_head list; | ||
85 | }; | ||
86 | |||
87 | struct calipso_map_cache_entry { | ||
88 | u32 hash; | ||
89 | unsigned char *key; | ||
90 | size_t key_len; | ||
91 | |||
92 | struct netlbl_lsm_cache *lsm_data; | ||
93 | |||
94 | u32 activity; | ||
95 | struct list_head list; | ||
96 | }; | ||
97 | |||
98 | static struct calipso_map_cache_bkt *calipso_cache; | ||
99 | |||
100 | /* Label Mapping Cache Functions | ||
101 | */ | ||
102 | |||
103 | /** | ||
104 | * calipso_cache_entry_free - Frees a cache entry | ||
105 | * @entry: the entry to free | ||
106 | * | ||
107 | * Description: | ||
108 | * This function frees the memory associated with a cache entry including the | ||
109 | * LSM cache data if there are no longer any users, i.e. reference count == 0. | ||
110 | * | ||
111 | */ | ||
112 | static void calipso_cache_entry_free(struct calipso_map_cache_entry *entry) | ||
113 | { | ||
114 | if (entry->lsm_data) | ||
115 | netlbl_secattr_cache_free(entry->lsm_data); | ||
116 | kfree(entry->key); | ||
117 | kfree(entry); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * calipso_map_cache_hash - Hashing function for the CALIPSO cache | ||
122 | * @key: the hash key | ||
123 | * @key_len: the length of the key in bytes | ||
124 | * | ||
125 | * Description: | ||
126 | * The CALIPSO tag hashing function. Returns a 32-bit hash value. | ||
127 | * | ||
128 | */ | ||
129 | static u32 calipso_map_cache_hash(const unsigned char *key, u32 key_len) | ||
130 | { | ||
131 | return jhash(key, key_len, 0); | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * calipso_cache_init - Initialize the CALIPSO cache | ||
136 | * | ||
137 | * Description: | ||
138 | * Initializes the CALIPSO label mapping cache, this function should be called | ||
139 | * before any of the other functions defined in this file. Returns zero on | ||
140 | * success, negative values on error. | ||
141 | * | ||
142 | */ | ||
143 | static int __init calipso_cache_init(void) | ||
144 | { | ||
145 | u32 iter; | ||
146 | |||
147 | calipso_cache = kcalloc(CALIPSO_CACHE_BUCKETS, | ||
148 | sizeof(struct calipso_map_cache_bkt), | ||
149 | GFP_KERNEL); | ||
150 | if (!calipso_cache) | ||
151 | return -ENOMEM; | ||
152 | |||
153 | for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) { | ||
154 | spin_lock_init(&calipso_cache[iter].lock); | ||
155 | calipso_cache[iter].size = 0; | ||
156 | INIT_LIST_HEAD(&calipso_cache[iter].list); | ||
157 | } | ||
158 | |||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * calipso_cache_invalidate - Invalidates the current CALIPSO cache | ||
164 | * | ||
165 | * Description: | ||
166 | * Invalidates and frees any entries in the CALIPSO cache. Returns zero on | ||
167 | * success and negative values on failure. | ||
168 | * | ||
169 | */ | ||
170 | static void calipso_cache_invalidate(void) | ||
171 | { | ||
172 | struct calipso_map_cache_entry *entry, *tmp_entry; | ||
173 | u32 iter; | ||
174 | |||
175 | for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) { | ||
176 | spin_lock_bh(&calipso_cache[iter].lock); | ||
177 | list_for_each_entry_safe(entry, | ||
178 | tmp_entry, | ||
179 | &calipso_cache[iter].list, list) { | ||
180 | list_del(&entry->list); | ||
181 | calipso_cache_entry_free(entry); | ||
182 | } | ||
183 | calipso_cache[iter].size = 0; | ||
184 | spin_unlock_bh(&calipso_cache[iter].lock); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * calipso_cache_check - Check the CALIPSO cache for a label mapping | ||
190 | * @key: the buffer to check | ||
191 | * @key_len: buffer length in bytes | ||
192 | * @secattr: the security attribute struct to use | ||
193 | * | ||
194 | * Description: | ||
195 | * This function checks the cache to see if a label mapping already exists for | ||
196 | * the given key. If there is a match then the cache is adjusted and the | ||
197 | * @secattr struct is populated with the correct LSM security attributes. The | ||
198 | * cache is adjusted in the following manner if the entry is not already the | ||
199 | * first in the cache bucket: | ||
200 | * | ||
201 | * 1. The cache entry's activity counter is incremented | ||
202 | * 2. The previous (higher ranking) entry's activity counter is decremented | ||
203 | * 3. If the difference between the two activity counters is geater than | ||
204 | * CALIPSO_CACHE_REORDERLIMIT the two entries are swapped | ||
205 | * | ||
206 | * Returns zero on success, -ENOENT for a cache miss, and other negative values | ||
207 | * on error. | ||
208 | * | ||
209 | */ | ||
210 | static int calipso_cache_check(const unsigned char *key, | ||
211 | u32 key_len, | ||
212 | struct netlbl_lsm_secattr *secattr) | ||
213 | { | ||
214 | u32 bkt; | ||
215 | struct calipso_map_cache_entry *entry; | ||
216 | struct calipso_map_cache_entry *prev_entry = NULL; | ||
217 | u32 hash; | ||
218 | |||
219 | if (!calipso_cache_enabled) | ||
220 | return -ENOENT; | ||
221 | |||
222 | hash = calipso_map_cache_hash(key, key_len); | ||
223 | bkt = hash & (CALIPSO_CACHE_BUCKETS - 1); | ||
224 | spin_lock_bh(&calipso_cache[bkt].lock); | ||
225 | list_for_each_entry(entry, &calipso_cache[bkt].list, list) { | ||
226 | if (entry->hash == hash && | ||
227 | entry->key_len == key_len && | ||
228 | memcmp(entry->key, key, key_len) == 0) { | ||
229 | entry->activity += 1; | ||
230 | atomic_inc(&entry->lsm_data->refcount); | ||
231 | secattr->cache = entry->lsm_data; | ||
232 | secattr->flags |= NETLBL_SECATTR_CACHE; | ||
233 | secattr->type = NETLBL_NLTYPE_CALIPSO; | ||
234 | if (!prev_entry) { | ||
235 | spin_unlock_bh(&calipso_cache[bkt].lock); | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | if (prev_entry->activity > 0) | ||
240 | prev_entry->activity -= 1; | ||
241 | if (entry->activity > prev_entry->activity && | ||
242 | entry->activity - prev_entry->activity > | ||
243 | CALIPSO_CACHE_REORDERLIMIT) { | ||
244 | __list_del(entry->list.prev, entry->list.next); | ||
245 | __list_add(&entry->list, | ||
246 | prev_entry->list.prev, | ||
247 | &prev_entry->list); | ||
248 | } | ||
249 | |||
250 | spin_unlock_bh(&calipso_cache[bkt].lock); | ||
251 | return 0; | ||
252 | } | ||
253 | prev_entry = entry; | ||
254 | } | ||
255 | spin_unlock_bh(&calipso_cache[bkt].lock); | ||
256 | |||
257 | return -ENOENT; | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * calipso_cache_add - Add an entry to the CALIPSO cache | ||
262 | * @calipso_ptr: the CALIPSO option | ||
263 | * @secattr: the packet's security attributes | ||
264 | * | ||
265 | * Description: | ||
266 | * Add a new entry into the CALIPSO label mapping cache. Add the new entry to | ||
267 | * head of the cache bucket's list, if the cache bucket is out of room remove | ||
268 | * the last entry in the list first. It is important to note that there is | ||
269 | * currently no checking for duplicate keys. Returns zero on success, | ||
270 | * negative values on failure. The key stored starts at calipso_ptr + 2, | ||
271 | * i.e. the type and length bytes are not stored, this corresponds to | ||
272 | * calipso_ptr[1] bytes of data. | ||
273 | * | ||
274 | */ | ||
275 | static int calipso_cache_add(const unsigned char *calipso_ptr, | ||
276 | const struct netlbl_lsm_secattr *secattr) | ||
277 | { | ||
278 | int ret_val = -EPERM; | ||
279 | u32 bkt; | ||
280 | struct calipso_map_cache_entry *entry = NULL; | ||
281 | struct calipso_map_cache_entry *old_entry = NULL; | ||
282 | u32 calipso_ptr_len; | ||
283 | |||
284 | if (!calipso_cache_enabled || calipso_cache_bucketsize <= 0) | ||
285 | return 0; | ||
286 | |||
287 | calipso_ptr_len = calipso_ptr[1]; | ||
288 | |||
289 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | ||
290 | if (!entry) | ||
291 | return -ENOMEM; | ||
292 | entry->key = kmemdup(calipso_ptr + 2, calipso_ptr_len, GFP_ATOMIC); | ||
293 | if (!entry->key) { | ||
294 | ret_val = -ENOMEM; | ||
295 | goto cache_add_failure; | ||
296 | } | ||
297 | entry->key_len = calipso_ptr_len; | ||
298 | entry->hash = calipso_map_cache_hash(calipso_ptr, calipso_ptr_len); | ||
299 | atomic_inc(&secattr->cache->refcount); | ||
300 | entry->lsm_data = secattr->cache; | ||
301 | |||
302 | bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1); | ||
303 | spin_lock_bh(&calipso_cache[bkt].lock); | ||
304 | if (calipso_cache[bkt].size < calipso_cache_bucketsize) { | ||
305 | list_add(&entry->list, &calipso_cache[bkt].list); | ||
306 | calipso_cache[bkt].size += 1; | ||
307 | } else { | ||
308 | old_entry = list_entry(calipso_cache[bkt].list.prev, | ||
309 | struct calipso_map_cache_entry, list); | ||
310 | list_del(&old_entry->list); | ||
311 | list_add(&entry->list, &calipso_cache[bkt].list); | ||
312 | calipso_cache_entry_free(old_entry); | ||
313 | } | ||
314 | spin_unlock_bh(&calipso_cache[bkt].lock); | ||
315 | |||
316 | return 0; | ||
317 | |||
318 | cache_add_failure: | ||
319 | if (entry) | ||
320 | calipso_cache_entry_free(entry); | ||
321 | return ret_val; | ||
322 | } | ||
323 | |||
324 | /* DOI List Functions | ||
325 | */ | ||
326 | |||
327 | /** | ||
328 | * calipso_doi_search - Searches for a DOI definition | ||
329 | * @doi: the DOI to search for | ||
330 | * | ||
331 | * Description: | ||
332 | * Search the DOI definition list for a DOI definition with a DOI value that | ||
333 | * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). | ||
334 | * Returns a pointer to the DOI definition on success and NULL on failure. | ||
335 | */ | ||
336 | static struct calipso_doi *calipso_doi_search(u32 doi) | ||
337 | { | ||
338 | struct calipso_doi *iter; | ||
339 | |||
340 | list_for_each_entry_rcu(iter, &calipso_doi_list, list) | ||
341 | if (iter->doi == doi && atomic_read(&iter->refcount)) | ||
342 | return iter; | ||
343 | return NULL; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * calipso_doi_add - Add a new DOI to the CALIPSO protocol engine | ||
348 | * @doi_def: the DOI structure | ||
349 | * @audit_info: NetLabel audit information | ||
350 | * | ||
351 | * Description: | ||
352 | * The caller defines a new DOI for use by the CALIPSO engine and calls this | ||
353 | * function to add it to the list of acceptable domains. The caller must | ||
354 | * ensure that the mapping table specified in @doi_def->map meets all of the | ||
355 | * requirements of the mapping type (see calipso.h for details). Returns | ||
356 | * zero on success and non-zero on failure. | ||
357 | * | ||
358 | */ | ||
359 | static int calipso_doi_add(struct calipso_doi *doi_def, | ||
360 | struct netlbl_audit *audit_info) | ||
361 | { | ||
362 | int ret_val = -EINVAL; | ||
363 | u32 doi; | ||
364 | u32 doi_type; | ||
365 | struct audit_buffer *audit_buf; | ||
366 | |||
367 | doi = doi_def->doi; | ||
368 | doi_type = doi_def->type; | ||
369 | |||
370 | if (doi_def->doi == CALIPSO_DOI_UNKNOWN) | ||
371 | goto doi_add_return; | ||
372 | |||
373 | atomic_set(&doi_def->refcount, 1); | ||
374 | |||
375 | spin_lock(&calipso_doi_list_lock); | ||
376 | if (calipso_doi_search(doi_def->doi)) { | ||
377 | spin_unlock(&calipso_doi_list_lock); | ||
378 | ret_val = -EEXIST; | ||
379 | goto doi_add_return; | ||
380 | } | ||
381 | list_add_tail_rcu(&doi_def->list, &calipso_doi_list); | ||
382 | spin_unlock(&calipso_doi_list_lock); | ||
383 | ret_val = 0; | ||
384 | |||
385 | doi_add_return: | ||
386 | audit_buf = netlbl_audit_start(AUDIT_MAC_CALIPSO_ADD, audit_info); | ||
387 | if (audit_buf) { | ||
388 | const char *type_str; | ||
389 | |||
390 | switch (doi_type) { | ||
391 | case CALIPSO_MAP_PASS: | ||
392 | type_str = "pass"; | ||
393 | break; | ||
394 | default: | ||
395 | type_str = "(unknown)"; | ||
396 | } | ||
397 | audit_log_format(audit_buf, | ||
398 | " calipso_doi=%u calipso_type=%s res=%u", | ||
399 | doi, type_str, ret_val == 0 ? 1 : 0); | ||
400 | audit_log_end(audit_buf); | ||
401 | } | ||
402 | |||
403 | return ret_val; | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * calipso_doi_free - Frees a DOI definition | ||
408 | * @doi_def: the DOI definition | ||
409 | * | ||
410 | * Description: | ||
411 | * This function frees all of the memory associated with a DOI definition. | ||
412 | * | ||
413 | */ | ||
414 | static void calipso_doi_free(struct calipso_doi *doi_def) | ||
415 | { | ||
416 | kfree(doi_def); | ||
417 | } | ||
418 | |||
419 | /** | ||
420 | * calipso_doi_free_rcu - Frees a DOI definition via the RCU pointer | ||
421 | * @entry: the entry's RCU field | ||
422 | * | ||
423 | * Description: | ||
424 | * This function is designed to be used as a callback to the call_rcu() | ||
425 | * function so that the memory allocated to the DOI definition can be released | ||
426 | * safely. | ||
427 | * | ||
428 | */ | ||
429 | static void calipso_doi_free_rcu(struct rcu_head *entry) | ||
430 | { | ||
431 | struct calipso_doi *doi_def; | ||
432 | |||
433 | doi_def = container_of(entry, struct calipso_doi, rcu); | ||
434 | calipso_doi_free(doi_def); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * calipso_doi_remove - Remove an existing DOI from the CALIPSO protocol engine | ||
439 | * @doi: the DOI value | ||
440 | * @audit_secid: the LSM secid to use in the audit message | ||
441 | * | ||
442 | * Description: | ||
443 | * Removes a DOI definition from the CALIPSO engine. The NetLabel routines will | ||
444 | * be called to release their own LSM domain mappings as well as our own | ||
445 | * domain list. Returns zero on success and negative values on failure. | ||
446 | * | ||
447 | */ | ||
448 | static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info) | ||
449 | { | ||
450 | int ret_val; | ||
451 | struct calipso_doi *doi_def; | ||
452 | struct audit_buffer *audit_buf; | ||
453 | |||
454 | spin_lock(&calipso_doi_list_lock); | ||
455 | doi_def = calipso_doi_search(doi); | ||
456 | if (!doi_def) { | ||
457 | spin_unlock(&calipso_doi_list_lock); | ||
458 | ret_val = -ENOENT; | ||
459 | goto doi_remove_return; | ||
460 | } | ||
461 | if (!atomic_dec_and_test(&doi_def->refcount)) { | ||
462 | spin_unlock(&calipso_doi_list_lock); | ||
463 | ret_val = -EBUSY; | ||
464 | goto doi_remove_return; | ||
465 | } | ||
466 | list_del_rcu(&doi_def->list); | ||
467 | spin_unlock(&calipso_doi_list_lock); | ||
468 | |||
469 | call_rcu(&doi_def->rcu, calipso_doi_free_rcu); | ||
470 | ret_val = 0; | ||
471 | |||
472 | doi_remove_return: | ||
473 | audit_buf = netlbl_audit_start(AUDIT_MAC_CALIPSO_DEL, audit_info); | ||
474 | if (audit_buf) { | ||
475 | audit_log_format(audit_buf, | ||
476 | " calipso_doi=%u res=%u", | ||
477 | doi, ret_val == 0 ? 1 : 0); | ||
478 | audit_log_end(audit_buf); | ||
479 | } | ||
480 | |||
481 | return ret_val; | ||
482 | } | ||
483 | |||
484 | /** | ||
485 | * calipso_doi_getdef - Returns a reference to a valid DOI definition | ||
486 | * @doi: the DOI value | ||
487 | * | ||
488 | * Description: | ||
489 | * Searches for a valid DOI definition and if one is found it is returned to | ||
490 | * the caller. Otherwise NULL is returned. The caller must ensure that | ||
491 | * calipso_doi_putdef() is called when the caller is done. | ||
492 | * | ||
493 | */ | ||
494 | static struct calipso_doi *calipso_doi_getdef(u32 doi) | ||
495 | { | ||
496 | struct calipso_doi *doi_def; | ||
497 | |||
498 | rcu_read_lock(); | ||
499 | doi_def = calipso_doi_search(doi); | ||
500 | if (!doi_def) | ||
501 | goto doi_getdef_return; | ||
502 | if (!atomic_inc_not_zero(&doi_def->refcount)) | ||
503 | doi_def = NULL; | ||
504 | |||
505 | doi_getdef_return: | ||
506 | rcu_read_unlock(); | ||
507 | return doi_def; | ||
508 | } | ||
509 | |||
510 | /** | ||
511 | * calipso_doi_putdef - Releases a reference for the given DOI definition | ||
512 | * @doi_def: the DOI definition | ||
513 | * | ||
514 | * Description: | ||
515 | * Releases a DOI definition reference obtained from calipso_doi_getdef(). | ||
516 | * | ||
517 | */ | ||
518 | static void calipso_doi_putdef(struct calipso_doi *doi_def) | ||
519 | { | ||
520 | if (!doi_def) | ||
521 | return; | ||
522 | |||
523 | if (!atomic_dec_and_test(&doi_def->refcount)) | ||
524 | return; | ||
525 | spin_lock(&calipso_doi_list_lock); | ||
526 | list_del_rcu(&doi_def->list); | ||
527 | spin_unlock(&calipso_doi_list_lock); | ||
528 | |||
529 | call_rcu(&doi_def->rcu, calipso_doi_free_rcu); | ||
530 | } | ||
531 | |||
532 | /** | ||
533 | * calipso_doi_walk - Iterate through the DOI definitions | ||
534 | * @skip_cnt: skip past this number of DOI definitions, updated | ||
535 | * @callback: callback for each DOI definition | ||
536 | * @cb_arg: argument for the callback function | ||
537 | * | ||
538 | * Description: | ||
539 | * Iterate over the DOI definition list, skipping the first @skip_cnt entries. | ||
540 | * For each entry call @callback, if @callback returns a negative value stop | ||
541 | * 'walking' through the list and return. Updates the value in @skip_cnt upon | ||
542 | * return. Returns zero on success, negative values on failure. | ||
543 | * | ||
544 | */ | ||
545 | static int calipso_doi_walk(u32 *skip_cnt, | ||
546 | int (*callback)(struct calipso_doi *doi_def, | ||
547 | void *arg), | ||
548 | void *cb_arg) | ||
549 | { | ||
550 | int ret_val = -ENOENT; | ||
551 | u32 doi_cnt = 0; | ||
552 | struct calipso_doi *iter_doi; | ||
553 | |||
554 | rcu_read_lock(); | ||
555 | list_for_each_entry_rcu(iter_doi, &calipso_doi_list, list) | ||
556 | if (atomic_read(&iter_doi->refcount) > 0) { | ||
557 | if (doi_cnt++ < *skip_cnt) | ||
558 | continue; | ||
559 | ret_val = callback(iter_doi, cb_arg); | ||
560 | if (ret_val < 0) { | ||
561 | doi_cnt--; | ||
562 | goto doi_walk_return; | ||
563 | } | ||
564 | } | ||
565 | |||
566 | doi_walk_return: | ||
567 | rcu_read_unlock(); | ||
568 | *skip_cnt = doi_cnt; | ||
569 | return ret_val; | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * calipso_validate - Validate a CALIPSO option | ||
574 | * @skb: the packet | ||
575 | * @option: the start of the option | ||
576 | * | ||
577 | * Description: | ||
578 | * This routine is called to validate a CALIPSO option. | ||
579 | * If the option is valid then %true is returned, otherwise | ||
580 | * %false is returned. | ||
581 | * | ||
582 | * The caller should have already checked that the length of the | ||
583 | * option (including the TLV header) is >= 10 and that the catmap | ||
584 | * length is consistent with the option length. | ||
585 | * | ||
586 | * We leave checks on the level and categories to the socket layer. | ||
587 | */ | ||
588 | bool calipso_validate(const struct sk_buff *skb, const unsigned char *option) | ||
589 | { | ||
590 | struct calipso_doi *doi_def; | ||
591 | bool ret_val; | ||
592 | u16 crc, len = option[1] + 2; | ||
593 | static const u8 zero[2]; | ||
594 | |||
595 | /* The original CRC runs over the option including the TLV header | ||
596 | * with the CRC-16 field (at offset 8) zeroed out. */ | ||
597 | crc = crc_ccitt(0xffff, option, 8); | ||
598 | crc = crc_ccitt(crc, zero, sizeof(zero)); | ||
599 | if (len > 10) | ||
600 | crc = crc_ccitt(crc, option + 10, len - 10); | ||
601 | crc = ~crc; | ||
602 | if (option[8] != (crc & 0xff) || option[9] != ((crc >> 8) & 0xff)) | ||
603 | return false; | ||
604 | |||
605 | rcu_read_lock(); | ||
606 | doi_def = calipso_doi_search(get_unaligned_be32(option + 2)); | ||
607 | ret_val = !!doi_def; | ||
608 | rcu_read_unlock(); | ||
609 | |||
610 | return ret_val; | ||
611 | } | ||
612 | |||
613 | /** | ||
614 | * calipso_map_cat_hton - Perform a category mapping from host to network | ||
615 | * @doi_def: the DOI definition | ||
616 | * @secattr: the security attributes | ||
617 | * @net_cat: the zero'd out category bitmap in network/CALIPSO format | ||
618 | * @net_cat_len: the length of the CALIPSO bitmap in bytes | ||
619 | * | ||
620 | * Description: | ||
621 | * Perform a label mapping to translate a local MLS category bitmap to the | ||
622 | * correct CALIPSO bitmap using the given DOI definition. Returns the minimum | ||
623 | * size in bytes of the network bitmap on success, negative values otherwise. | ||
624 | * | ||
625 | */ | ||
626 | static int calipso_map_cat_hton(const struct calipso_doi *doi_def, | ||
627 | const struct netlbl_lsm_secattr *secattr, | ||
628 | unsigned char *net_cat, | ||
629 | u32 net_cat_len) | ||
630 | { | ||
631 | int spot = -1; | ||
632 | u32 net_spot_max = 0; | ||
633 | u32 net_clen_bits = net_cat_len * 8; | ||
634 | |||
635 | for (;;) { | ||
636 | spot = netlbl_catmap_walk(secattr->attr.mls.cat, | ||
637 | spot + 1); | ||
638 | if (spot < 0) | ||
639 | break; | ||
640 | if (spot >= net_clen_bits) | ||
641 | return -ENOSPC; | ||
642 | netlbl_bitmap_setbit(net_cat, spot, 1); | ||
643 | |||
644 | if (spot > net_spot_max) | ||
645 | net_spot_max = spot; | ||
646 | } | ||
647 | |||
648 | return (net_spot_max / 32 + 1) * 4; | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * calipso_map_cat_ntoh - Perform a category mapping from network to host | ||
653 | * @doi_def: the DOI definition | ||
654 | * @net_cat: the category bitmap in network/CALIPSO format | ||
655 | * @net_cat_len: the length of the CALIPSO bitmap in bytes | ||
656 | * @secattr: the security attributes | ||
657 | * | ||
658 | * Description: | ||
659 | * Perform a label mapping to translate a CALIPSO bitmap to the correct local | ||
660 | * MLS category bitmap using the given DOI definition. Returns zero on | ||
661 | * success, negative values on failure. | ||
662 | * | ||
663 | */ | ||
664 | static int calipso_map_cat_ntoh(const struct calipso_doi *doi_def, | ||
665 | const unsigned char *net_cat, | ||
666 | u32 net_cat_len, | ||
667 | struct netlbl_lsm_secattr *secattr) | ||
668 | { | ||
669 | int ret_val; | ||
670 | int spot = -1; | ||
671 | u32 net_clen_bits = net_cat_len * 8; | ||
672 | |||
673 | for (;;) { | ||
674 | spot = netlbl_bitmap_walk(net_cat, | ||
675 | net_clen_bits, | ||
676 | spot + 1, | ||
677 | 1); | ||
678 | if (spot < 0) { | ||
679 | if (spot == -2) | ||
680 | return -EFAULT; | ||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat, | ||
685 | spot, | ||
686 | GFP_ATOMIC); | ||
687 | if (ret_val != 0) | ||
688 | return ret_val; | ||
689 | } | ||
690 | |||
691 | return -EINVAL; | ||
692 | } | ||
693 | |||
694 | /** | ||
695 | * calipso_pad_write - Writes pad bytes in TLV format | ||
696 | * @buf: the buffer | ||
697 | * @offset: offset from start of buffer to write padding | ||
698 | * @count: number of pad bytes to write | ||
699 | * | ||
700 | * Description: | ||
701 | * Write @count bytes of TLV padding into @buffer starting at offset @offset. | ||
702 | * @count should be less than 8 - see RFC 4942. | ||
703 | * | ||
704 | */ | ||
705 | static int calipso_pad_write(unsigned char *buf, unsigned int offset, | ||
706 | unsigned int count) | ||
707 | { | ||
708 | if (WARN_ON_ONCE(count >= 8)) | ||
709 | return -EINVAL; | ||
710 | |||
711 | switch (count) { | ||
712 | case 0: | ||
713 | break; | ||
714 | case 1: | ||
715 | buf[offset] = IPV6_TLV_PAD1; | ||
716 | break; | ||
717 | default: | ||
718 | buf[offset] = IPV6_TLV_PADN; | ||
719 | buf[offset + 1] = count - 2; | ||
720 | if (count > 2) | ||
721 | memset(buf + offset + 2, 0, count - 2); | ||
722 | break; | ||
723 | } | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | /** | ||
728 | * calipso_genopt - Generate a CALIPSO option | ||
729 | * @buf: the option buffer | ||
730 | * @start: offset from which to write | ||
731 | * @buf_len: the size of opt_buf | ||
732 | * @doi_def: the CALIPSO DOI to use | ||
733 | * @secattr: the security attributes | ||
734 | * | ||
735 | * Description: | ||
736 | * Generate a CALIPSO option using the DOI definition and security attributes | ||
737 | * passed to the function. This also generates upto three bytes of leading | ||
738 | * padding that ensures that the option is 4n + 2 aligned. It returns the | ||
739 | * number of bytes written (including any initial padding). | ||
740 | */ | ||
741 | static int calipso_genopt(unsigned char *buf, u32 start, u32 buf_len, | ||
742 | const struct calipso_doi *doi_def, | ||
743 | const struct netlbl_lsm_secattr *secattr) | ||
744 | { | ||
745 | int ret_val; | ||
746 | u32 len, pad; | ||
747 | u16 crc; | ||
748 | static const unsigned char padding[4] = {2, 1, 0, 3}; | ||
749 | unsigned char *calipso; | ||
750 | |||
751 | /* CALIPSO has 4n + 2 alignment */ | ||
752 | pad = padding[start & 3]; | ||
753 | if (buf_len <= start + pad + CALIPSO_HDR_LEN) | ||
754 | return -ENOSPC; | ||
755 | |||
756 | if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) | ||
757 | return -EPERM; | ||
758 | |||
759 | len = CALIPSO_HDR_LEN; | ||
760 | |||
761 | if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { | ||
762 | ret_val = calipso_map_cat_hton(doi_def, | ||
763 | secattr, | ||
764 | buf + start + pad + len, | ||
765 | buf_len - start - pad - len); | ||
766 | if (ret_val < 0) | ||
767 | return ret_val; | ||
768 | len += ret_val; | ||
769 | } | ||
770 | |||
771 | calipso_pad_write(buf, start, pad); | ||
772 | calipso = buf + start + pad; | ||
773 | |||
774 | calipso[0] = IPV6_TLV_CALIPSO; | ||
775 | calipso[1] = len - 2; | ||
776 | *(__be32 *)(calipso + 2) = htonl(doi_def->doi); | ||
777 | calipso[6] = (len - CALIPSO_HDR_LEN) / 4; | ||
778 | calipso[7] = secattr->attr.mls.lvl, | ||
779 | crc = ~crc_ccitt(0xffff, calipso, len); | ||
780 | calipso[8] = crc & 0xff; | ||
781 | calipso[9] = (crc >> 8) & 0xff; | ||
782 | return pad + len; | ||
783 | } | ||
784 | |||
785 | /* Hop-by-hop hdr helper functions | ||
786 | */ | ||
787 | |||
788 | /** | ||
789 | * calipso_opt_update - Replaces socket's hop options with a new set | ||
790 | * @sk: the socket | ||
791 | * @hop: new hop options | ||
792 | * | ||
793 | * Description: | ||
794 | * Replaces @sk's hop options with @hop. @hop may be NULL to leave | ||
795 | * the socket with no hop options. | ||
796 | * | ||
797 | */ | ||
798 | static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop) | ||
799 | { | ||
800 | struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; | ||
801 | |||
802 | txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS, | ||
803 | hop, hop ? ipv6_optlen(hop) : 0); | ||
804 | txopt_put(old); | ||
805 | if (IS_ERR(txopts)) | ||
806 | return PTR_ERR(txopts); | ||
807 | |||
808 | txopts = ipv6_update_options(sk, txopts); | ||
809 | if (txopts) { | ||
810 | atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); | ||
811 | txopt_put(txopts); | ||
812 | } | ||
813 | |||
814 | return 0; | ||
815 | } | ||
816 | |||
817 | /** | ||
818 | * calipso_tlv_len - Returns the length of the TLV | ||
819 | * @opt: the option header | ||
820 | * @offset: offset of the TLV within the header | ||
821 | * | ||
822 | * Description: | ||
823 | * Returns the length of the TLV option at offset @offset within | ||
824 | * the option header @opt. Checks that the entire TLV fits inside | ||
825 | * the option header, returns a negative value if this is not the case. | ||
826 | */ | ||
827 | static int calipso_tlv_len(struct ipv6_opt_hdr *opt, unsigned int offset) | ||
828 | { | ||
829 | unsigned char *tlv = (unsigned char *)opt; | ||
830 | unsigned int opt_len = ipv6_optlen(opt), tlv_len; | ||
831 | |||
832 | if (offset < sizeof(*opt) || offset >= opt_len) | ||
833 | return -EINVAL; | ||
834 | if (tlv[offset] == IPV6_TLV_PAD1) | ||
835 | return 1; | ||
836 | if (offset + 1 >= opt_len) | ||
837 | return -EINVAL; | ||
838 | tlv_len = tlv[offset + 1] + 2; | ||
839 | if (offset + tlv_len > opt_len) | ||
840 | return -EINVAL; | ||
841 | return tlv_len; | ||
842 | } | ||
843 | |||
844 | /** | ||
845 | * calipso_opt_find - Finds the CALIPSO option in an IPv6 hop options header | ||
846 | * @hop: the hop options header | ||
847 | * @start: on return holds the offset of any leading padding | ||
848 | * @end: on return holds the offset of the first non-pad TLV after CALIPSO | ||
849 | * | ||
850 | * Description: | ||
851 | * Finds the space occupied by a CALIPSO option (including any leading and | ||
852 | * trailing padding). | ||
853 | * | ||
854 | * If a CALIPSO option exists set @start and @end to the | ||
855 | * offsets within @hop of the start of padding before the first | ||
856 | * CALIPSO option and the end of padding after the first CALIPSO | ||
857 | * option. In this case the function returns 0. | ||
858 | * | ||
859 | * In the absence of a CALIPSO option, @start and @end will be | ||
860 | * set to the start and end of any trailing padding in the header. | ||
861 | * This is useful when appending a new option, as the caller may want | ||
862 | * to overwrite some of this padding. In this case the function will | ||
863 | * return -ENOENT. | ||
864 | */ | ||
865 | static int calipso_opt_find(struct ipv6_opt_hdr *hop, unsigned int *start, | ||
866 | unsigned int *end) | ||
867 | { | ||
868 | int ret_val = -ENOENT, tlv_len; | ||
869 | unsigned int opt_len, offset, offset_s = 0, offset_e = 0; | ||
870 | unsigned char *opt = (unsigned char *)hop; | ||
871 | |||
872 | opt_len = ipv6_optlen(hop); | ||
873 | offset = sizeof(*hop); | ||
874 | |||
875 | while (offset < opt_len) { | ||
876 | tlv_len = calipso_tlv_len(hop, offset); | ||
877 | if (tlv_len < 0) | ||
878 | return tlv_len; | ||
879 | |||
880 | switch (opt[offset]) { | ||
881 | case IPV6_TLV_PAD1: | ||
882 | case IPV6_TLV_PADN: | ||
883 | if (offset_e) | ||
884 | offset_e = offset; | ||
885 | break; | ||
886 | case IPV6_TLV_CALIPSO: | ||
887 | ret_val = 0; | ||
888 | offset_e = offset; | ||
889 | break; | ||
890 | default: | ||
891 | if (offset_e == 0) | ||
892 | offset_s = offset; | ||
893 | else | ||
894 | goto out; | ||
895 | } | ||
896 | offset += tlv_len; | ||
897 | } | ||
898 | |||
899 | out: | ||
900 | if (offset_s) | ||
901 | *start = offset_s + calipso_tlv_len(hop, offset_s); | ||
902 | else | ||
903 | *start = sizeof(*hop); | ||
904 | if (offset_e) | ||
905 | *end = offset_e + calipso_tlv_len(hop, offset_e); | ||
906 | else | ||
907 | *end = opt_len; | ||
908 | |||
909 | return ret_val; | ||
910 | } | ||
911 | |||
912 | /** | ||
913 | * calipso_opt_insert - Inserts a CALIPSO option into an IPv6 hop opt hdr | ||
914 | * @hop: the original hop options header | ||
915 | * @doi_def: the CALIPSO DOI to use | ||
916 | * @secattr: the specific security attributes of the socket | ||
917 | * | ||
918 | * Description: | ||
919 | * Creates a new hop options header based on @hop with a | ||
920 | * CALIPSO option added to it. If @hop already contains a CALIPSO | ||
921 | * option this is overwritten, otherwise the new option is appended | ||
922 | * after any existing options. If @hop is NULL then the new header | ||
923 | * will contain just the CALIPSO option and any needed padding. | ||
924 | * | ||
925 | */ | ||
926 | static struct ipv6_opt_hdr * | ||
927 | calipso_opt_insert(struct ipv6_opt_hdr *hop, | ||
928 | const struct calipso_doi *doi_def, | ||
929 | const struct netlbl_lsm_secattr *secattr) | ||
930 | { | ||
931 | unsigned int start, end, buf_len, pad, hop_len; | ||
932 | struct ipv6_opt_hdr *new; | ||
933 | int ret_val; | ||
934 | |||
935 | if (hop) { | ||
936 | hop_len = ipv6_optlen(hop); | ||
937 | ret_val = calipso_opt_find(hop, &start, &end); | ||
938 | if (ret_val && ret_val != -ENOENT) | ||
939 | return ERR_PTR(ret_val); | ||
940 | } else { | ||
941 | hop_len = 0; | ||
942 | start = sizeof(*hop); | ||
943 | end = 0; | ||
944 | } | ||
945 | |||
946 | buf_len = hop_len + start - end + CALIPSO_OPT_LEN_MAX_WITH_PAD; | ||
947 | new = kzalloc(buf_len, GFP_ATOMIC); | ||
948 | if (!new) | ||
949 | return ERR_PTR(-ENOMEM); | ||
950 | |||
951 | if (start > sizeof(*hop)) | ||
952 | memcpy(new, hop, start); | ||
953 | ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, | ||
954 | secattr); | ||
955 | if (ret_val < 0) | ||
956 | return ERR_PTR(ret_val); | ||
957 | |||
958 | buf_len = start + ret_val; | ||
959 | /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ | ||
960 | pad = ((buf_len & 4) + (end & 7)) & 7; | ||
961 | calipso_pad_write((unsigned char *)new, buf_len, pad); | ||
962 | buf_len += pad; | ||
963 | |||
964 | if (end != hop_len) { | ||
965 | memcpy((char *)new + buf_len, (char *)hop + end, hop_len - end); | ||
966 | buf_len += hop_len - end; | ||
967 | } | ||
968 | new->nexthdr = 0; | ||
969 | new->hdrlen = buf_len / 8 - 1; | ||
970 | |||
971 | return new; | ||
972 | } | ||
973 | |||
974 | /** | ||
975 | * calipso_opt_del - Removes the CALIPSO option from an option header | ||
976 | * @hop: the original header | ||
977 | * @new: the new header | ||
978 | * | ||
979 | * Description: | ||
980 | * Creates a new header based on @hop without any CALIPSO option. If @hop | ||
981 | * doesn't contain a CALIPSO option it returns -ENOENT. If @hop contains | ||
982 | * no other non-padding options, it returns zero with @new set to NULL. | ||
983 | * Otherwise it returns zero, creates a new header without the CALIPSO | ||
984 | * option (and removing as much padding as possible) and returns with | ||
985 | * @new set to that header. | ||
986 | * | ||
987 | */ | ||
988 | static int calipso_opt_del(struct ipv6_opt_hdr *hop, | ||
989 | struct ipv6_opt_hdr **new) | ||
990 | { | ||
991 | int ret_val; | ||
992 | unsigned int start, end, delta, pad, hop_len; | ||
993 | |||
994 | ret_val = calipso_opt_find(hop, &start, &end); | ||
995 | if (ret_val) | ||
996 | return ret_val; | ||
997 | |||
998 | hop_len = ipv6_optlen(hop); | ||
999 | if (start == sizeof(*hop) && end == hop_len) { | ||
1000 | /* There's no other option in the header so return NULL */ | ||
1001 | *new = NULL; | ||
1002 | return 0; | ||
1003 | } | ||
1004 | |||
1005 | delta = (end - start) & ~7; | ||
1006 | *new = kzalloc(hop_len - delta, GFP_ATOMIC); | ||
1007 | if (!*new) | ||
1008 | return -ENOMEM; | ||
1009 | |||
1010 | memcpy(*new, hop, start); | ||
1011 | (*new)->hdrlen -= delta / 8; | ||
1012 | pad = (end - start) & 7; | ||
1013 | calipso_pad_write((unsigned char *)*new, start, pad); | ||
1014 | if (end != hop_len) | ||
1015 | memcpy((char *)*new + start + pad, (char *)hop + end, | ||
1016 | hop_len - end); | ||
1017 | |||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | /** | ||
1022 | * calipso_opt_getattr - Get the security attributes from a memory block | ||
1023 | * @calipso: the CALIPSO option | ||
1024 | * @secattr: the security attributes | ||
1025 | * | ||
1026 | * Description: | ||
1027 | * Inspect @calipso and return the security attributes in @secattr. | ||
1028 | * Returns zero on success and negative values on failure. | ||
1029 | * | ||
1030 | */ | ||
1031 | static int calipso_opt_getattr(const unsigned char *calipso, | ||
1032 | struct netlbl_lsm_secattr *secattr) | ||
1033 | { | ||
1034 | int ret_val = -ENOMSG; | ||
1035 | u32 doi, len = calipso[1], cat_len = calipso[6] * 4; | ||
1036 | struct calipso_doi *doi_def; | ||
1037 | |||
1038 | if (cat_len + 8 > len) | ||
1039 | return -EINVAL; | ||
1040 | |||
1041 | if (calipso_cache_check(calipso + 2, calipso[1], secattr) == 0) | ||
1042 | return 0; | ||
1043 | |||
1044 | doi = get_unaligned_be32(calipso + 2); | ||
1045 | rcu_read_lock(); | ||
1046 | doi_def = calipso_doi_search(doi); | ||
1047 | if (!doi_def) | ||
1048 | goto getattr_return; | ||
1049 | |||
1050 | secattr->attr.mls.lvl = calipso[7]; | ||
1051 | secattr->flags |= NETLBL_SECATTR_MLS_LVL; | ||
1052 | |||
1053 | if (cat_len) { | ||
1054 | ret_val = calipso_map_cat_ntoh(doi_def, | ||
1055 | calipso + 10, | ||
1056 | cat_len, | ||
1057 | secattr); | ||
1058 | if (ret_val != 0) { | ||
1059 | netlbl_catmap_free(secattr->attr.mls.cat); | ||
1060 | goto getattr_return; | ||
1061 | } | ||
1062 | |||
1063 | secattr->flags |= NETLBL_SECATTR_MLS_CAT; | ||
1064 | } | ||
1065 | |||
1066 | secattr->type = NETLBL_NLTYPE_CALIPSO; | ||
1067 | |||
1068 | getattr_return: | ||
1069 | rcu_read_unlock(); | ||
1070 | return ret_val; | ||
1071 | } | ||
1072 | |||
1073 | /* sock functions. | ||
1074 | */ | ||
1075 | |||
1076 | /** | ||
1077 | * calipso_sock_getattr - Get the security attributes from a sock | ||
1078 | * @sk: the sock | ||
1079 | * @secattr: the security attributes | ||
1080 | * | ||
1081 | * Description: | ||
1082 | * Query @sk to see if there is a CALIPSO option attached to the sock and if | ||
1083 | * there is return the CALIPSO security attributes in @secattr. This function | ||
1084 | * requires that @sk be locked, or privately held, but it does not do any | ||
1085 | * locking itself. Returns zero on success and negative values on failure. | ||
1086 | * | ||
1087 | */ | ||
1088 | static int calipso_sock_getattr(struct sock *sk, | ||
1089 | struct netlbl_lsm_secattr *secattr) | ||
1090 | { | ||
1091 | struct ipv6_opt_hdr *hop; | ||
1092 | int opt_len, len, ret_val = -ENOMSG, offset; | ||
1093 | unsigned char *opt; | ||
1094 | struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); | ||
1095 | |||
1096 | if (!txopts || !txopts->hopopt) | ||
1097 | goto done; | ||
1098 | |||
1099 | hop = txopts->hopopt; | ||
1100 | opt = (unsigned char *)hop; | ||
1101 | opt_len = ipv6_optlen(hop); | ||
1102 | offset = sizeof(*hop); | ||
1103 | while (offset < opt_len) { | ||
1104 | len = calipso_tlv_len(hop, offset); | ||
1105 | if (len < 0) { | ||
1106 | ret_val = len; | ||
1107 | goto done; | ||
1108 | } | ||
1109 | switch (opt[offset]) { | ||
1110 | case IPV6_TLV_CALIPSO: | ||
1111 | if (len < CALIPSO_HDR_LEN) | ||
1112 | ret_val = -EINVAL; | ||
1113 | else | ||
1114 | ret_val = calipso_opt_getattr(&opt[offset], | ||
1115 | secattr); | ||
1116 | goto done; | ||
1117 | default: | ||
1118 | offset += len; | ||
1119 | break; | ||
1120 | } | ||
1121 | } | ||
1122 | done: | ||
1123 | txopt_put(txopts); | ||
1124 | return ret_val; | ||
1125 | } | ||
1126 | |||
1127 | /** | ||
1128 | * calipso_sock_setattr - Add a CALIPSO option to a socket | ||
1129 | * @sk: the socket | ||
1130 | * @doi_def: the CALIPSO DOI to use | ||
1131 | * @secattr: the specific security attributes of the socket | ||
1132 | * | ||
1133 | * Description: | ||
1134 | * Set the CALIPSO option on the given socket using the DOI definition and | ||
1135 | * security attributes passed to the function. This function requires | ||
1136 | * exclusive access to @sk, which means it either needs to be in the | ||
1137 | * process of being created or locked. Returns zero on success and negative | ||
1138 | * values on failure. | ||
1139 | * | ||
1140 | */ | ||
1141 | static int calipso_sock_setattr(struct sock *sk, | ||
1142 | const struct calipso_doi *doi_def, | ||
1143 | const struct netlbl_lsm_secattr *secattr) | ||
1144 | { | ||
1145 | int ret_val; | ||
1146 | struct ipv6_opt_hdr *old, *new; | ||
1147 | struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); | ||
1148 | |||
1149 | old = NULL; | ||
1150 | if (txopts) | ||
1151 | old = txopts->hopopt; | ||
1152 | |||
1153 | new = calipso_opt_insert(old, doi_def, secattr); | ||
1154 | txopt_put(txopts); | ||
1155 | if (IS_ERR(new)) | ||
1156 | return PTR_ERR(new); | ||
1157 | |||
1158 | ret_val = calipso_opt_update(sk, new); | ||
1159 | |||
1160 | kfree(new); | ||
1161 | return ret_val; | ||
1162 | } | ||
1163 | |||
1164 | /** | ||
1165 | * calipso_sock_delattr - Delete the CALIPSO option from a socket | ||
1166 | * @sk: the socket | ||
1167 | * | ||
1168 | * Description: | ||
1169 | * Removes the CALIPSO option from a socket, if present. | ||
1170 | * | ||
1171 | */ | ||
1172 | static void calipso_sock_delattr(struct sock *sk) | ||
1173 | { | ||
1174 | struct ipv6_opt_hdr *new_hop; | ||
1175 | struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); | ||
1176 | |||
1177 | if (!txopts || !txopts->hopopt) | ||
1178 | goto done; | ||
1179 | |||
1180 | if (calipso_opt_del(txopts->hopopt, &new_hop)) | ||
1181 | goto done; | ||
1182 | |||
1183 | calipso_opt_update(sk, new_hop); | ||
1184 | kfree(new_hop); | ||
1185 | |||
1186 | done: | ||
1187 | txopt_put(txopts); | ||
1188 | } | ||
1189 | |||
1190 | /* request sock functions. | ||
1191 | */ | ||
1192 | |||
1193 | /** | ||
1194 | * calipso_req_setattr - Add a CALIPSO option to a connection request socket | ||
1195 | * @req: the connection request socket | ||
1196 | * @doi_def: the CALIPSO DOI to use | ||
1197 | * @secattr: the specific security attributes of the socket | ||
1198 | * | ||
1199 | * Description: | ||
1200 | * Set the CALIPSO option on the given socket using the DOI definition and | ||
1201 | * security attributes passed to the function. Returns zero on success and | ||
1202 | * negative values on failure. | ||
1203 | * | ||
1204 | */ | ||
1205 | static int calipso_req_setattr(struct request_sock *req, | ||
1206 | const struct calipso_doi *doi_def, | ||
1207 | const struct netlbl_lsm_secattr *secattr) | ||
1208 | { | ||
1209 | struct ipv6_txoptions *txopts; | ||
1210 | struct inet_request_sock *req_inet = inet_rsk(req); | ||
1211 | struct ipv6_opt_hdr *old, *new; | ||
1212 | struct sock *sk = sk_to_full_sk(req_to_sk(req)); | ||
1213 | |||
1214 | if (req_inet->ipv6_opt && req_inet->ipv6_opt->hopopt) | ||
1215 | old = req_inet->ipv6_opt->hopopt; | ||
1216 | else | ||
1217 | old = NULL; | ||
1218 | |||
1219 | new = calipso_opt_insert(old, doi_def, secattr); | ||
1220 | if (IS_ERR(new)) | ||
1221 | return PTR_ERR(new); | ||
1222 | |||
1223 | txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, | ||
1224 | new, new ? ipv6_optlen(new) : 0); | ||
1225 | |||
1226 | kfree(new); | ||
1227 | |||
1228 | if (IS_ERR(txopts)) | ||
1229 | return PTR_ERR(txopts); | ||
1230 | |||
1231 | txopts = xchg(&req_inet->ipv6_opt, txopts); | ||
1232 | if (txopts) { | ||
1233 | atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); | ||
1234 | txopt_put(txopts); | ||
1235 | } | ||
1236 | |||
1237 | return 0; | ||
1238 | } | ||
1239 | |||
1240 | /** | ||
1241 | * calipso_req_delattr - Delete the CALIPSO option from a request socket | ||
1242 | * @reg: the request socket | ||
1243 | * | ||
1244 | * Description: | ||
1245 | * Removes the CALIPSO option from a request socket, if present. | ||
1246 | * | ||
1247 | */ | ||
1248 | static void calipso_req_delattr(struct request_sock *req) | ||
1249 | { | ||
1250 | struct inet_request_sock *req_inet = inet_rsk(req); | ||
1251 | struct ipv6_opt_hdr *new; | ||
1252 | struct ipv6_txoptions *txopts; | ||
1253 | struct sock *sk = sk_to_full_sk(req_to_sk(req)); | ||
1254 | |||
1255 | if (!req_inet->ipv6_opt || !req_inet->ipv6_opt->hopopt) | ||
1256 | return; | ||
1257 | |||
1258 | if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) | ||
1259 | return; /* Nothing to do */ | ||
1260 | |||
1261 | txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, | ||
1262 | new, new ? ipv6_optlen(new) : 0); | ||
1263 | |||
1264 | if (!IS_ERR(txopts)) { | ||
1265 | txopts = xchg(&req_inet->ipv6_opt, txopts); | ||
1266 | if (txopts) { | ||
1267 | atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); | ||
1268 | txopt_put(txopts); | ||
1269 | } | ||
1270 | } | ||
1271 | kfree(new); | ||
1272 | } | ||
1273 | |||
1274 | /* skbuff functions. | ||
1275 | */ | ||
1276 | |||
1277 | /** | ||
1278 | * calipso_skbuff_optptr - Find the CALIPSO option in the packet | ||
1279 | * @skb: the packet | ||
1280 | * | ||
1281 | * Description: | ||
1282 | * Parse the packet's IP header looking for a CALIPSO option. Returns a pointer | ||
1283 | * to the start of the CALIPSO option on success, NULL if one if not found. | ||
1284 | * | ||
1285 | */ | ||
1286 | static unsigned char *calipso_skbuff_optptr(const struct sk_buff *skb) | ||
1287 | { | ||
1288 | const struct ipv6hdr *ip6_hdr = ipv6_hdr(skb); | ||
1289 | int offset; | ||
1290 | |||
1291 | if (ip6_hdr->nexthdr != NEXTHDR_HOP) | ||
1292 | return NULL; | ||
1293 | |||
1294 | offset = ipv6_find_tlv(skb, sizeof(*ip6_hdr), IPV6_TLV_CALIPSO); | ||
1295 | if (offset >= 0) | ||
1296 | return (unsigned char *)ip6_hdr + offset; | ||
1297 | |||
1298 | return NULL; | ||
1299 | } | ||
1300 | |||
1301 | /** | ||
1302 | * calipso_skbuff_setattr - Set the CALIPSO option on a packet | ||
1303 | * @skb: the packet | ||
1304 | * @doi_def: the CALIPSO DOI to use | ||
1305 | * @secattr: the security attributes | ||
1306 | * | ||
1307 | * Description: | ||
1308 | * Set the CALIPSO option on the given packet based on the security attributes. | ||
1309 | * Returns a pointer to the IP header on success and NULL on failure. | ||
1310 | * | ||
1311 | */ | ||
1312 | static int calipso_skbuff_setattr(struct sk_buff *skb, | ||
1313 | const struct calipso_doi *doi_def, | ||
1314 | const struct netlbl_lsm_secattr *secattr) | ||
1315 | { | ||
1316 | int ret_val; | ||
1317 | struct ipv6hdr *ip6_hdr; | ||
1318 | struct ipv6_opt_hdr *hop; | ||
1319 | unsigned char buf[CALIPSO_MAX_BUFFER]; | ||
1320 | int len_delta, new_end, pad; | ||
1321 | unsigned int start, end; | ||
1322 | |||
1323 | ip6_hdr = ipv6_hdr(skb); | ||
1324 | if (ip6_hdr->nexthdr == NEXTHDR_HOP) { | ||
1325 | hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); | ||
1326 | ret_val = calipso_opt_find(hop, &start, &end); | ||
1327 | if (ret_val && ret_val != -ENOENT) | ||
1328 | return ret_val; | ||
1329 | } else { | ||
1330 | start = 0; | ||
1331 | end = 0; | ||
1332 | } | ||
1333 | |||
1334 | memset(buf, 0, sizeof(buf)); | ||
1335 | ret_val = calipso_genopt(buf, start & 3, sizeof(buf), doi_def, secattr); | ||
1336 | if (ret_val < 0) | ||
1337 | return ret_val; | ||
1338 | |||
1339 | new_end = start + ret_val; | ||
1340 | /* At this point new_end aligns to 4n, so (new_end & 4) pads to 8n */ | ||
1341 | pad = ((new_end & 4) + (end & 7)) & 7; | ||
1342 | len_delta = new_end - (int)end + pad; | ||
1343 | ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); | ||
1344 | if (ret_val < 0) | ||
1345 | return ret_val; | ||
1346 | |||
1347 | if (len_delta) { | ||
1348 | if (len_delta > 0) | ||
1349 | skb_push(skb, len_delta); | ||
1350 | else | ||
1351 | skb_pull(skb, -len_delta); | ||
1352 | memmove((char *)ip6_hdr - len_delta, ip6_hdr, | ||
1353 | sizeof(*ip6_hdr) + start); | ||
1354 | skb_reset_network_header(skb); | ||
1355 | ip6_hdr = ipv6_hdr(skb); | ||
1356 | } | ||
1357 | |||
1358 | hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); | ||
1359 | if (start == 0) { | ||
1360 | struct ipv6_opt_hdr *new_hop = (struct ipv6_opt_hdr *)buf; | ||
1361 | |||
1362 | new_hop->nexthdr = ip6_hdr->nexthdr; | ||
1363 | new_hop->hdrlen = len_delta / 8 - 1; | ||
1364 | ip6_hdr->nexthdr = NEXTHDR_HOP; | ||
1365 | } else { | ||
1366 | hop->hdrlen += len_delta / 8; | ||
1367 | } | ||
1368 | memcpy((char *)hop + start, buf + (start & 3), new_end - start); | ||
1369 | calipso_pad_write((unsigned char *)hop, new_end, pad); | ||
1370 | |||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | /** | ||
1375 | * calipso_skbuff_delattr - Delete any CALIPSO options from a packet | ||
1376 | * @skb: the packet | ||
1377 | * | ||
1378 | * Description: | ||
1379 | * Removes any and all CALIPSO options from the given packet. Returns zero on | ||
1380 | * success, negative values on failure. | ||
1381 | * | ||
1382 | */ | ||
1383 | static int calipso_skbuff_delattr(struct sk_buff *skb) | ||
1384 | { | ||
1385 | int ret_val; | ||
1386 | struct ipv6hdr *ip6_hdr; | ||
1387 | struct ipv6_opt_hdr *old_hop; | ||
1388 | u32 old_hop_len, start = 0, end = 0, delta, size, pad; | ||
1389 | |||
1390 | if (!calipso_skbuff_optptr(skb)) | ||
1391 | return 0; | ||
1392 | |||
1393 | /* since we are changing the packet we should make a copy */ | ||
1394 | ret_val = skb_cow(skb, skb_headroom(skb)); | ||
1395 | if (ret_val < 0) | ||
1396 | return ret_val; | ||
1397 | |||
1398 | ip6_hdr = ipv6_hdr(skb); | ||
1399 | old_hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); | ||
1400 | old_hop_len = ipv6_optlen(old_hop); | ||
1401 | |||
1402 | ret_val = calipso_opt_find(old_hop, &start, &end); | ||
1403 | if (ret_val) | ||
1404 | return ret_val; | ||
1405 | |||
1406 | if (start == sizeof(*old_hop) && end == old_hop_len) { | ||
1407 | /* There's no other option in the header so we delete | ||
1408 | * the whole thing. */ | ||
1409 | delta = old_hop_len; | ||
1410 | size = sizeof(*ip6_hdr); | ||
1411 | ip6_hdr->nexthdr = old_hop->nexthdr; | ||
1412 | } else { | ||
1413 | delta = (end - start) & ~7; | ||
1414 | if (delta) | ||
1415 | old_hop->hdrlen -= delta / 8; | ||
1416 | pad = (end - start) & 7; | ||
1417 | size = sizeof(*ip6_hdr) + start + pad; | ||
1418 | calipso_pad_write((unsigned char *)old_hop, start, pad); | ||
1419 | } | ||
1420 | |||
1421 | if (delta) { | ||
1422 | skb_pull(skb, delta); | ||
1423 | memmove((char *)ip6_hdr + delta, ip6_hdr, size); | ||
1424 | skb_reset_network_header(skb); | ||
1425 | } | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | static const struct netlbl_calipso_ops ops = { | ||
1431 | .doi_add = calipso_doi_add, | ||
1432 | .doi_free = calipso_doi_free, | ||
1433 | .doi_remove = calipso_doi_remove, | ||
1434 | .doi_getdef = calipso_doi_getdef, | ||
1435 | .doi_putdef = calipso_doi_putdef, | ||
1436 | .doi_walk = calipso_doi_walk, | ||
1437 | .sock_getattr = calipso_sock_getattr, | ||
1438 | .sock_setattr = calipso_sock_setattr, | ||
1439 | .sock_delattr = calipso_sock_delattr, | ||
1440 | .req_setattr = calipso_req_setattr, | ||
1441 | .req_delattr = calipso_req_delattr, | ||
1442 | .opt_getattr = calipso_opt_getattr, | ||
1443 | .skbuff_optptr = calipso_skbuff_optptr, | ||
1444 | .skbuff_setattr = calipso_skbuff_setattr, | ||
1445 | .skbuff_delattr = calipso_skbuff_delattr, | ||
1446 | .cache_invalidate = calipso_cache_invalidate, | ||
1447 | .cache_add = calipso_cache_add | ||
1448 | }; | ||
1449 | |||
1450 | /** | ||
1451 | * calipso_init - Initialize the CALIPSO module | ||
1452 | * | ||
1453 | * Description: | ||
1454 | * Initialize the CALIPSO module and prepare it for use. Returns zero on | ||
1455 | * success and negative values on failure. | ||
1456 | * | ||
1457 | */ | ||
1458 | int __init calipso_init(void) | ||
1459 | { | ||
1460 | int ret_val; | ||
1461 | |||
1462 | ret_val = calipso_cache_init(); | ||
1463 | if (!ret_val) | ||
1464 | netlbl_calipso_ops_register(&ops); | ||
1465 | return ret_val; | ||
1466 | } | ||
1467 | |||
1468 | void calipso_exit(void) | ||
1469 | { | ||
1470 | netlbl_calipso_ops_register(NULL); | ||
1471 | calipso_cache_invalidate(); | ||
1472 | kfree(calipso_cache); | ||
1473 | } | ||
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 8de5dd7aaa05..139ceb68bd37 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <net/ndisc.h> | 43 | #include <net/ndisc.h> |
44 | #include <net/ip6_route.h> | 44 | #include <net/ip6_route.h> |
45 | #include <net/addrconf.h> | 45 | #include <net/addrconf.h> |
46 | #include <net/calipso.h> | ||
46 | #if IS_ENABLED(CONFIG_IPV6_MIP6) | 47 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
47 | #include <net/xfrm.h> | 48 | #include <net/xfrm.h> |
48 | #endif | 49 | #endif |
@@ -603,6 +604,28 @@ drop: | |||
603 | return false; | 604 | return false; |
604 | } | 605 | } |
605 | 606 | ||
607 | /* CALIPSO RFC 5570 */ | ||
608 | |||
609 | static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff) | ||
610 | { | ||
611 | const unsigned char *nh = skb_network_header(skb); | ||
612 | |||
613 | if (nh[optoff + 1] < 8) | ||
614 | goto drop; | ||
615 | |||
616 | if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1]) | ||
617 | goto drop; | ||
618 | |||
619 | if (!calipso_validate(skb, nh + optoff)) | ||
620 | goto drop; | ||
621 | |||
622 | return true; | ||
623 | |||
624 | drop: | ||
625 | kfree_skb(skb); | ||
626 | return false; | ||
627 | } | ||
628 | |||
606 | static const struct tlvtype_proc tlvprochopopt_lst[] = { | 629 | static const struct tlvtype_proc tlvprochopopt_lst[] = { |
607 | { | 630 | { |
608 | .type = IPV6_TLV_ROUTERALERT, | 631 | .type = IPV6_TLV_ROUTERALERT, |
@@ -612,6 +635,10 @@ static const struct tlvtype_proc tlvprochopopt_lst[] = { | |||
612 | .type = IPV6_TLV_JUMBO, | 635 | .type = IPV6_TLV_JUMBO, |
613 | .func = ipv6_hop_jumbo, | 636 | .func = ipv6_hop_jumbo, |
614 | }, | 637 | }, |
638 | { | ||
639 | .type = IPV6_TLV_CALIPSO, | ||
640 | .func = ipv6_hop_calipso, | ||
641 | }, | ||
615 | { -1, } | 642 | { -1, } |
616 | }; | 643 | }; |
617 | 644 | ||
@@ -758,6 +785,27 @@ static int ipv6_renew_option(void *ohdr, | |||
758 | return 0; | 785 | return 0; |
759 | } | 786 | } |
760 | 787 | ||
788 | /** | ||
789 | * ipv6_renew_options - replace a specific ext hdr with a new one. | ||
790 | * | ||
791 | * @sk: sock from which to allocate memory | ||
792 | * @opt: original options | ||
793 | * @newtype: option type to replace in @opt | ||
794 | * @newopt: new option of type @newtype to replace (user-mem) | ||
795 | * @newoptlen: length of @newopt | ||
796 | * | ||
797 | * Returns a new set of options which is a copy of @opt with the | ||
798 | * option type @newtype replaced with @newopt. | ||
799 | * | ||
800 | * @opt may be NULL, in which case a new set of options is returned | ||
801 | * containing just @newopt. | ||
802 | * | ||
803 | * @newopt may be NULL, in which case the specified option type is | ||
804 | * not copied into the new set of options. | ||
805 | * | ||
806 | * The new set of options is allocated from the socket option memory | ||
807 | * buffer of @sk. | ||
808 | */ | ||
761 | struct ipv6_txoptions * | 809 | struct ipv6_txoptions * |
762 | ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | 810 | ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, |
763 | int newtype, | 811 | int newtype, |
@@ -830,6 +878,34 @@ out: | |||
830 | return ERR_PTR(err); | 878 | return ERR_PTR(err); |
831 | } | 879 | } |
832 | 880 | ||
881 | /** | ||
882 | * ipv6_renew_options_kern - replace a specific ext hdr with a new one. | ||
883 | * | ||
884 | * @sk: sock from which to allocate memory | ||
885 | * @opt: original options | ||
886 | * @newtype: option type to replace in @opt | ||
887 | * @newopt: new option of type @newtype to replace (kernel-mem) | ||
888 | * @newoptlen: length of @newopt | ||
889 | * | ||
890 | * See ipv6_renew_options(). The difference is that @newopt is | ||
891 | * kernel memory, rather than user memory. | ||
892 | */ | ||
893 | struct ipv6_txoptions * | ||
894 | ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt, | ||
895 | int newtype, struct ipv6_opt_hdr *newopt, | ||
896 | int newoptlen) | ||
897 | { | ||
898 | struct ipv6_txoptions *ret_val; | ||
899 | const mm_segment_t old_fs = get_fs(); | ||
900 | |||
901 | set_fs(KERNEL_DS); | ||
902 | ret_val = ipv6_renew_options(sk, opt, newtype, | ||
903 | (struct ipv6_opt_hdr __user *)newopt, | ||
904 | newoptlen); | ||
905 | set_fs(old_fs); | ||
906 | return ret_val; | ||
907 | } | ||
908 | |||
833 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, | 909 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, |
834 | struct ipv6_txoptions *opt) | 910 | struct ipv6_txoptions *opt) |
835 | { | 911 | { |
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 9508a20fbf61..305e2ed730bf 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c | |||
@@ -112,7 +112,7 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, | |||
112 | } | 112 | } |
113 | EXPORT_SYMBOL(ipv6_skip_exthdr); | 113 | EXPORT_SYMBOL(ipv6_skip_exthdr); |
114 | 114 | ||
115 | int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) | 115 | int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type) |
116 | { | 116 | { |
117 | const unsigned char *nh = skb_network_header(skb); | 117 | const unsigned char *nh = skb_network_header(skb); |
118 | int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); | 118 | int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index a9895e15ee9c..5330262ab673 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -98,7 +98,6 @@ int ip6_ra_control(struct sock *sk, int sel) | |||
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | static | ||
102 | struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | 101 | struct ipv6_txoptions *ipv6_update_options(struct sock *sk, |
103 | struct ipv6_txoptions *opt) | 102 | struct ipv6_txoptions *opt) |
104 | { | 103 | { |
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 45243bbe5253..69c50e737c54 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -15,6 +15,9 @@ | |||
15 | #include <net/ipv6.h> | 15 | #include <net/ipv6.h> |
16 | #include <net/addrconf.h> | 16 | #include <net/addrconf.h> |
17 | #include <net/inet_frag.h> | 17 | #include <net/inet_frag.h> |
18 | #ifdef CONFIG_NETLABEL | ||
19 | #include <net/calipso.h> | ||
20 | #endif | ||
18 | 21 | ||
19 | static int one = 1; | 22 | static int one = 1; |
20 | static int auto_flowlabels_min; | 23 | static int auto_flowlabels_min; |
@@ -106,6 +109,22 @@ static struct ctl_table ipv6_rotable[] = { | |||
106 | .proc_handler = proc_dointvec_minmax, | 109 | .proc_handler = proc_dointvec_minmax, |
107 | .extra1 = &one | 110 | .extra1 = &one |
108 | }, | 111 | }, |
112 | #ifdef CONFIG_NETLABEL | ||
113 | { | ||
114 | .procname = "calipso_cache_enable", | ||
115 | .data = &calipso_cache_enabled, | ||
116 | .maxlen = sizeof(int), | ||
117 | .mode = 0644, | ||
118 | .proc_handler = proc_dointvec, | ||
119 | }, | ||
120 | { | ||
121 | .procname = "calipso_cache_bucket_size", | ||
122 | .data = &calipso_cache_bucketsize, | ||
123 | .maxlen = sizeof(int), | ||
124 | .mode = 0644, | ||
125 | .proc_handler = proc_dointvec, | ||
126 | }, | ||
127 | #endif /* CONFIG_NETLABEL */ | ||
109 | { } | 128 | { } |
110 | }; | 129 | }; |
111 | 130 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 37cf91323319..33df8b8575cc 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -443,6 +443,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
443 | { | 443 | { |
444 | struct inet_request_sock *ireq = inet_rsk(req); | 444 | struct inet_request_sock *ireq = inet_rsk(req); |
445 | struct ipv6_pinfo *np = inet6_sk(sk); | 445 | struct ipv6_pinfo *np = inet6_sk(sk); |
446 | struct ipv6_txoptions *opt; | ||
446 | struct flowi6 *fl6 = &fl->u.ip6; | 447 | struct flowi6 *fl6 = &fl->u.ip6; |
447 | struct sk_buff *skb; | 448 | struct sk_buff *skb; |
448 | int err = -ENOMEM; | 449 | int err = -ENOMEM; |
@@ -463,8 +464,10 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
463 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); | 464 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
464 | 465 | ||
465 | rcu_read_lock(); | 466 | rcu_read_lock(); |
466 | err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), | 467 | opt = ireq->ipv6_opt; |
467 | np->tclass); | 468 | if (!opt) |
469 | opt = rcu_dereference(np->opt); | ||
470 | err = ip6_xmit(sk, skb, fl6, opt, np->tclass); | ||
468 | rcu_read_unlock(); | 471 | rcu_read_unlock(); |
469 | err = net_xmit_eval(err); | 472 | err = net_xmit_eval(err); |
470 | } | 473 | } |
@@ -476,6 +479,7 @@ done: | |||
476 | 479 | ||
477 | static void tcp_v6_reqsk_destructor(struct request_sock *req) | 480 | static void tcp_v6_reqsk_destructor(struct request_sock *req) |
478 | { | 481 | { |
482 | kfree(inet_rsk(req)->ipv6_opt); | ||
479 | kfree_skb(inet_rsk(req)->pktopts); | 483 | kfree_skb(inet_rsk(req)->pktopts); |
480 | } | 484 | } |
481 | 485 | ||
@@ -1112,7 +1116,9 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1112 | but we make one more one thing there: reattach optmem | 1116 | but we make one more one thing there: reattach optmem |
1113 | to newsk. | 1117 | to newsk. |
1114 | */ | 1118 | */ |
1115 | opt = rcu_dereference(np->opt); | 1119 | opt = ireq->ipv6_opt; |
1120 | if (!opt) | ||
1121 | opt = rcu_dereference(np->opt); | ||
1116 | if (opt) { | 1122 | if (opt) { |
1117 | opt = ipv6_dup_options(newsk, opt); | 1123 | opt = ipv6_dup_options(newsk, opt); |
1118 | RCU_INIT_POINTER(newnp->opt, opt); | 1124 | RCU_INIT_POINTER(newnp->opt, opt); |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 37d674e6f8a9..02b45a8e8b35 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/skbuff.h> | 22 | #include <linux/skbuff.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/poll.h> | 24 | #include <linux/poll.h> |
25 | #include <linux/security.h> | ||
25 | #include <net/sock.h> | 26 | #include <net/sock.h> |
26 | #include <asm/ebcdic.h> | 27 | #include <asm/ebcdic.h> |
27 | #include <asm/cpcmd.h> | 28 | #include <asm/cpcmd.h> |
@@ -530,8 +531,10 @@ static void iucv_sock_close(struct sock *sk) | |||
530 | 531 | ||
531 | static void iucv_sock_init(struct sock *sk, struct sock *parent) | 532 | static void iucv_sock_init(struct sock *sk, struct sock *parent) |
532 | { | 533 | { |
533 | if (parent) | 534 | if (parent) { |
534 | sk->sk_type = parent->sk_type; | 535 | sk->sk_type = parent->sk_type; |
536 | security_sk_clone(parent, sk); | ||
537 | } | ||
535 | } | 538 | } |
536 | 539 | ||
537 | static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) | 540 | static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) |
diff --git a/net/netlabel/Kconfig b/net/netlabel/Kconfig index 56958c85f2b4..d9eaa30ffe3f 100644 --- a/net/netlabel/Kconfig +++ b/net/netlabel/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config NETLABEL | 5 | config NETLABEL |
6 | bool "NetLabel subsystem support" | 6 | bool "NetLabel subsystem support" |
7 | depends on SECURITY | 7 | depends on SECURITY |
8 | select CRC_CCITT if IPV6 | ||
8 | default n | 9 | default n |
9 | ---help--- | 10 | ---help--- |
10 | NetLabel provides support for explicit network packet labeling | 11 | NetLabel provides support for explicit network packet labeling |
diff --git a/net/netlabel/Makefile b/net/netlabel/Makefile index d2732fc952e2..d341ede0dca5 100644 --- a/net/netlabel/Makefile +++ b/net/netlabel/Makefile | |||
@@ -12,4 +12,4 @@ obj-y += netlabel_mgmt.o | |||
12 | # protocol modules | 12 | # protocol modules |
13 | obj-y += netlabel_unlabeled.o | 13 | obj-y += netlabel_unlabeled.o |
14 | obj-y += netlabel_cipso_v4.o | 14 | obj-y += netlabel_cipso_v4.o |
15 | 15 | obj-$(subst m,y,$(CONFIG_IPV6)) += netlabel_calipso.o | |
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c new file mode 100644 index 000000000000..2ec93c5e77bb --- /dev/null +++ b/net/netlabel/netlabel_calipso.c | |||
@@ -0,0 +1,740 @@ | |||
1 | /* | ||
2 | * NetLabel CALIPSO/IPv6 Support | ||
3 | * | ||
4 | * This file defines the CALIPSO/IPv6 functions for the NetLabel system. The | ||
5 | * NetLabel system manages static and dynamic label mappings for network | ||
6 | * protocols such as CIPSO and CALIPSO. | ||
7 | * | ||
8 | * Authors: Paul Moore <paul@paul-moore.com> | ||
9 | * Huw Davies <huw@codeweavers.com> | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | /* (c) Copyright Hewlett-Packard Development Company, L.P., 2006 | ||
14 | * (c) Copyright Huw Davies <huw@codeweavers.com>, 2015 | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License as published by | ||
18 | * the Free Software Foundation; either version 2 of the License, or | ||
19 | * (at your option) any later version. | ||
20 | * | ||
21 | * This program is distributed in the hope that it will be useful, | ||
22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
24 | * the GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #include <linux/types.h> | ||
32 | #include <linux/socket.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/audit.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <net/sock.h> | ||
38 | #include <net/netlink.h> | ||
39 | #include <net/genetlink.h> | ||
40 | #include <net/netlabel.h> | ||
41 | #include <net/calipso.h> | ||
42 | #include <linux/atomic.h> | ||
43 | |||
44 | #include "netlabel_user.h" | ||
45 | #include "netlabel_calipso.h" | ||
46 | #include "netlabel_mgmt.h" | ||
47 | #include "netlabel_domainhash.h" | ||
48 | |||
49 | /* Argument struct for calipso_doi_walk() */ | ||
50 | struct netlbl_calipso_doiwalk_arg { | ||
51 | struct netlink_callback *nl_cb; | ||
52 | struct sk_buff *skb; | ||
53 | u32 seq; | ||
54 | }; | ||
55 | |||
56 | /* Argument struct for netlbl_domhsh_walk() */ | ||
57 | struct netlbl_domhsh_walk_arg { | ||
58 | struct netlbl_audit *audit_info; | ||
59 | u32 doi; | ||
60 | }; | ||
61 | |||
62 | /* NetLabel Generic NETLINK CALIPSO family */ | ||
63 | static struct genl_family netlbl_calipso_gnl_family = { | ||
64 | .id = GENL_ID_GENERATE, | ||
65 | .hdrsize = 0, | ||
66 | .name = NETLBL_NLTYPE_CALIPSO_NAME, | ||
67 | .version = NETLBL_PROTO_VERSION, | ||
68 | .maxattr = NLBL_CALIPSO_A_MAX, | ||
69 | }; | ||
70 | |||
71 | /* NetLabel Netlink attribute policy */ | ||
72 | static const struct nla_policy calipso_genl_policy[NLBL_CALIPSO_A_MAX + 1] = { | ||
73 | [NLBL_CALIPSO_A_DOI] = { .type = NLA_U32 }, | ||
74 | [NLBL_CALIPSO_A_MTYPE] = { .type = NLA_U32 }, | ||
75 | }; | ||
76 | |||
77 | /* NetLabel Command Handlers | ||
78 | */ | ||
79 | /** | ||
80 | * netlbl_calipso_add_pass - Adds a CALIPSO pass DOI definition | ||
81 | * @info: the Generic NETLINK info block | ||
82 | * @audit_info: NetLabel audit information | ||
83 | * | ||
84 | * Description: | ||
85 | * Create a new CALIPSO_MAP_PASS DOI definition based on the given ADD message | ||
86 | * and add it to the CALIPSO engine. Return zero on success and non-zero on | ||
87 | * error. | ||
88 | * | ||
89 | */ | ||
90 | static int netlbl_calipso_add_pass(struct genl_info *info, | ||
91 | struct netlbl_audit *audit_info) | ||
92 | { | ||
93 | int ret_val; | ||
94 | struct calipso_doi *doi_def = NULL; | ||
95 | |||
96 | doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); | ||
97 | if (!doi_def) | ||
98 | return -ENOMEM; | ||
99 | doi_def->type = CALIPSO_MAP_PASS; | ||
100 | doi_def->doi = nla_get_u32(info->attrs[NLBL_CALIPSO_A_DOI]); | ||
101 | ret_val = calipso_doi_add(doi_def, audit_info); | ||
102 | if (ret_val != 0) | ||
103 | calipso_doi_free(doi_def); | ||
104 | |||
105 | return ret_val; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * netlbl_calipso_add - Handle an ADD message | ||
110 | * @skb: the NETLINK buffer | ||
111 | * @info: the Generic NETLINK info block | ||
112 | * | ||
113 | * Description: | ||
114 | * Create a new DOI definition based on the given ADD message and add it to the | ||
115 | * CALIPSO engine. Returns zero on success, negative values on failure. | ||
116 | * | ||
117 | */ | ||
118 | static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info) | ||
119 | |||
120 | { | ||
121 | int ret_val = -EINVAL; | ||
122 | struct netlbl_audit audit_info; | ||
123 | |||
124 | if (!info->attrs[NLBL_CALIPSO_A_DOI] || | ||
125 | !info->attrs[NLBL_CALIPSO_A_MTYPE]) | ||
126 | return -EINVAL; | ||
127 | |||
128 | netlbl_netlink_auditinfo(skb, &audit_info); | ||
129 | switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) { | ||
130 | case CALIPSO_MAP_PASS: | ||
131 | ret_val = netlbl_calipso_add_pass(info, &audit_info); | ||
132 | break; | ||
133 | } | ||
134 | if (ret_val == 0) | ||
135 | atomic_inc(&netlabel_mgmt_protocount); | ||
136 | |||
137 | return ret_val; | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * netlbl_calipso_list - Handle a LIST message | ||
142 | * @skb: the NETLINK buffer | ||
143 | * @info: the Generic NETLINK info block | ||
144 | * | ||
145 | * Description: | ||
146 | * Process a user generated LIST message and respond accordingly. | ||
147 | * Returns zero on success and negative values on error. | ||
148 | * | ||
149 | */ | ||
150 | static int netlbl_calipso_list(struct sk_buff *skb, struct genl_info *info) | ||
151 | { | ||
152 | int ret_val; | ||
153 | struct sk_buff *ans_skb = NULL; | ||
154 | void *data; | ||
155 | u32 doi; | ||
156 | struct calipso_doi *doi_def; | ||
157 | |||
158 | if (!info->attrs[NLBL_CALIPSO_A_DOI]) { | ||
159 | ret_val = -EINVAL; | ||
160 | goto list_failure; | ||
161 | } | ||
162 | |||
163 | doi = nla_get_u32(info->attrs[NLBL_CALIPSO_A_DOI]); | ||
164 | |||
165 | doi_def = calipso_doi_getdef(doi); | ||
166 | if (!doi_def) { | ||
167 | ret_val = -EINVAL; | ||
168 | goto list_failure; | ||
169 | } | ||
170 | |||
171 | ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
172 | if (!ans_skb) { | ||
173 | ret_val = -ENOMEM; | ||
174 | goto list_failure_put; | ||
175 | } | ||
176 | data = genlmsg_put_reply(ans_skb, info, &netlbl_calipso_gnl_family, | ||
177 | 0, NLBL_CALIPSO_C_LIST); | ||
178 | if (!data) { | ||
179 | ret_val = -ENOMEM; | ||
180 | goto list_failure_put; | ||
181 | } | ||
182 | |||
183 | ret_val = nla_put_u32(ans_skb, NLBL_CALIPSO_A_MTYPE, doi_def->type); | ||
184 | if (ret_val != 0) | ||
185 | goto list_failure_put; | ||
186 | |||
187 | calipso_doi_putdef(doi_def); | ||
188 | |||
189 | genlmsg_end(ans_skb, data); | ||
190 | return genlmsg_reply(ans_skb, info); | ||
191 | |||
192 | list_failure_put: | ||
193 | calipso_doi_putdef(doi_def); | ||
194 | list_failure: | ||
195 | kfree_skb(ans_skb); | ||
196 | return ret_val; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * netlbl_calipso_listall_cb - calipso_doi_walk() callback for LISTALL | ||
201 | * @doi_def: the CALIPSO DOI definition | ||
202 | * @arg: the netlbl_calipso_doiwalk_arg structure | ||
203 | * | ||
204 | * Description: | ||
205 | * This function is designed to be used as a callback to the | ||
206 | * calipso_doi_walk() function for use in generating a response for a LISTALL | ||
207 | * message. Returns the size of the message on success, negative values on | ||
208 | * failure. | ||
209 | * | ||
210 | */ | ||
211 | static int netlbl_calipso_listall_cb(struct calipso_doi *doi_def, void *arg) | ||
212 | { | ||
213 | int ret_val = -ENOMEM; | ||
214 | struct netlbl_calipso_doiwalk_arg *cb_arg = arg; | ||
215 | void *data; | ||
216 | |||
217 | data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid, | ||
218 | cb_arg->seq, &netlbl_calipso_gnl_family, | ||
219 | NLM_F_MULTI, NLBL_CALIPSO_C_LISTALL); | ||
220 | if (!data) | ||
221 | goto listall_cb_failure; | ||
222 | |||
223 | ret_val = nla_put_u32(cb_arg->skb, NLBL_CALIPSO_A_DOI, doi_def->doi); | ||
224 | if (ret_val != 0) | ||
225 | goto listall_cb_failure; | ||
226 | ret_val = nla_put_u32(cb_arg->skb, | ||
227 | NLBL_CALIPSO_A_MTYPE, | ||
228 | doi_def->type); | ||
229 | if (ret_val != 0) | ||
230 | goto listall_cb_failure; | ||
231 | |||
232 | genlmsg_end(cb_arg->skb, data); | ||
233 | return 0; | ||
234 | |||
235 | listall_cb_failure: | ||
236 | genlmsg_cancel(cb_arg->skb, data); | ||
237 | return ret_val; | ||
238 | } | ||
239 | |||
240 | /** | ||
241 | * netlbl_calipso_listall - Handle a LISTALL message | ||
242 | * @skb: the NETLINK buffer | ||
243 | * @cb: the NETLINK callback | ||
244 | * | ||
245 | * Description: | ||
246 | * Process a user generated LISTALL message and respond accordingly. Returns | ||
247 | * zero on success and negative values on error. | ||
248 | * | ||
249 | */ | ||
250 | static int netlbl_calipso_listall(struct sk_buff *skb, | ||
251 | struct netlink_callback *cb) | ||
252 | { | ||
253 | struct netlbl_calipso_doiwalk_arg cb_arg; | ||
254 | u32 doi_skip = cb->args[0]; | ||
255 | |||
256 | cb_arg.nl_cb = cb; | ||
257 | cb_arg.skb = skb; | ||
258 | cb_arg.seq = cb->nlh->nlmsg_seq; | ||
259 | |||
260 | calipso_doi_walk(&doi_skip, netlbl_calipso_listall_cb, &cb_arg); | ||
261 | |||
262 | cb->args[0] = doi_skip; | ||
263 | return skb->len; | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * netlbl_calipso_remove_cb - netlbl_calipso_remove() callback for REMOVE | ||
268 | * @entry: LSM domain mapping entry | ||
269 | * @arg: the netlbl_domhsh_walk_arg structure | ||
270 | * | ||
271 | * Description: | ||
272 | * This function is intended for use by netlbl_calipso_remove() as the callback | ||
273 | * for the netlbl_domhsh_walk() function; it removes LSM domain map entries | ||
274 | * which are associated with the CALIPSO DOI specified in @arg. Returns zero on | ||
275 | * success, negative values on failure. | ||
276 | * | ||
277 | */ | ||
278 | static int netlbl_calipso_remove_cb(struct netlbl_dom_map *entry, void *arg) | ||
279 | { | ||
280 | struct netlbl_domhsh_walk_arg *cb_arg = arg; | ||
281 | |||
282 | if (entry->def.type == NETLBL_NLTYPE_CALIPSO && | ||
283 | entry->def.calipso->doi == cb_arg->doi) | ||
284 | return netlbl_domhsh_remove_entry(entry, cb_arg->audit_info); | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * netlbl_calipso_remove - Handle a REMOVE message | ||
291 | * @skb: the NETLINK buffer | ||
292 | * @info: the Generic NETLINK info block | ||
293 | * | ||
294 | * Description: | ||
295 | * Process a user generated REMOVE message and respond accordingly. Returns | ||
296 | * zero on success, negative values on failure. | ||
297 | * | ||
298 | */ | ||
299 | static int netlbl_calipso_remove(struct sk_buff *skb, struct genl_info *info) | ||
300 | { | ||
301 | int ret_val = -EINVAL; | ||
302 | struct netlbl_domhsh_walk_arg cb_arg; | ||
303 | struct netlbl_audit audit_info; | ||
304 | u32 skip_bkt = 0; | ||
305 | u32 skip_chain = 0; | ||
306 | |||
307 | if (!info->attrs[NLBL_CALIPSO_A_DOI]) | ||
308 | return -EINVAL; | ||
309 | |||
310 | netlbl_netlink_auditinfo(skb, &audit_info); | ||
311 | cb_arg.doi = nla_get_u32(info->attrs[NLBL_CALIPSO_A_DOI]); | ||
312 | cb_arg.audit_info = &audit_info; | ||
313 | ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain, | ||
314 | netlbl_calipso_remove_cb, &cb_arg); | ||
315 | if (ret_val == 0 || ret_val == -ENOENT) { | ||
316 | ret_val = calipso_doi_remove(cb_arg.doi, &audit_info); | ||
317 | if (ret_val == 0) | ||
318 | atomic_dec(&netlabel_mgmt_protocount); | ||
319 | } | ||
320 | |||
321 | return ret_val; | ||
322 | } | ||
323 | |||
324 | /* NetLabel Generic NETLINK Command Definitions | ||
325 | */ | ||
326 | |||
327 | static const struct genl_ops netlbl_calipso_ops[] = { | ||
328 | { | ||
329 | .cmd = NLBL_CALIPSO_C_ADD, | ||
330 | .flags = GENL_ADMIN_PERM, | ||
331 | .policy = calipso_genl_policy, | ||
332 | .doit = netlbl_calipso_add, | ||
333 | .dumpit = NULL, | ||
334 | }, | ||
335 | { | ||
336 | .cmd = NLBL_CALIPSO_C_REMOVE, | ||
337 | .flags = GENL_ADMIN_PERM, | ||
338 | .policy = calipso_genl_policy, | ||
339 | .doit = netlbl_calipso_remove, | ||
340 | .dumpit = NULL, | ||
341 | }, | ||
342 | { | ||
343 | .cmd = NLBL_CALIPSO_C_LIST, | ||
344 | .flags = 0, | ||
345 | .policy = calipso_genl_policy, | ||
346 | .doit = netlbl_calipso_list, | ||
347 | .dumpit = NULL, | ||
348 | }, | ||
349 | { | ||
350 | .cmd = NLBL_CALIPSO_C_LISTALL, | ||
351 | .flags = 0, | ||
352 | .policy = calipso_genl_policy, | ||
353 | .doit = NULL, | ||
354 | .dumpit = netlbl_calipso_listall, | ||
355 | }, | ||
356 | }; | ||
357 | |||
358 | /* NetLabel Generic NETLINK Protocol Functions | ||
359 | */ | ||
360 | |||
361 | /** | ||
362 | * netlbl_calipso_genl_init - Register the CALIPSO NetLabel component | ||
363 | * | ||
364 | * Description: | ||
365 | * Register the CALIPSO packet NetLabel component with the Generic NETLINK | ||
366 | * mechanism. Returns zero on success, negative values on failure. | ||
367 | * | ||
368 | */ | ||
369 | int __init netlbl_calipso_genl_init(void) | ||
370 | { | ||
371 | return genl_register_family_with_ops(&netlbl_calipso_gnl_family, | ||
372 | netlbl_calipso_ops); | ||
373 | } | ||
374 | |||
375 | static const struct netlbl_calipso_ops *calipso_ops; | ||
376 | |||
377 | /** | ||
378 | * netlbl_calipso_ops_register - Register the CALIPSO operations | ||
379 | * | ||
380 | * Description: | ||
381 | * Register the CALIPSO packet engine operations. | ||
382 | * | ||
383 | */ | ||
384 | const struct netlbl_calipso_ops * | ||
385 | netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops) | ||
386 | { | ||
387 | return xchg(&calipso_ops, ops); | ||
388 | } | ||
389 | EXPORT_SYMBOL(netlbl_calipso_ops_register); | ||
390 | |||
391 | static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void) | ||
392 | { | ||
393 | return ACCESS_ONCE(calipso_ops); | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * calipso_doi_add - Add a new DOI to the CALIPSO protocol engine | ||
398 | * @doi_def: the DOI structure | ||
399 | * @audit_info: NetLabel audit information | ||
400 | * | ||
401 | * Description: | ||
402 | * The caller defines a new DOI for use by the CALIPSO engine and calls this | ||
403 | * function to add it to the list of acceptable domains. The caller must | ||
404 | * ensure that the mapping table specified in @doi_def->map meets all of the | ||
405 | * requirements of the mapping type (see calipso.h for details). Returns | ||
406 | * zero on success and non-zero on failure. | ||
407 | * | ||
408 | */ | ||
409 | int calipso_doi_add(struct calipso_doi *doi_def, | ||
410 | struct netlbl_audit *audit_info) | ||
411 | { | ||
412 | int ret_val = -ENOMSG; | ||
413 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
414 | |||
415 | if (ops) | ||
416 | ret_val = ops->doi_add(doi_def, audit_info); | ||
417 | return ret_val; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * calipso_doi_free - Frees a DOI definition | ||
422 | * @doi_def: the DOI definition | ||
423 | * | ||
424 | * Description: | ||
425 | * This function frees all of the memory associated with a DOI definition. | ||
426 | * | ||
427 | */ | ||
428 | void calipso_doi_free(struct calipso_doi *doi_def) | ||
429 | { | ||
430 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
431 | |||
432 | if (ops) | ||
433 | ops->doi_free(doi_def); | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * calipso_doi_remove - Remove an existing DOI from the CALIPSO protocol engine | ||
438 | * @doi: the DOI value | ||
439 | * @audit_secid: the LSM secid to use in the audit message | ||
440 | * | ||
441 | * Description: | ||
442 | * Removes a DOI definition from the CALIPSO engine. The NetLabel routines will | ||
443 | * be called to release their own LSM domain mappings as well as our own | ||
444 | * domain list. Returns zero on success and negative values on failure. | ||
445 | * | ||
446 | */ | ||
447 | int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info) | ||
448 | { | ||
449 | int ret_val = -ENOMSG; | ||
450 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
451 | |||
452 | if (ops) | ||
453 | ret_val = ops->doi_remove(doi, audit_info); | ||
454 | return ret_val; | ||
455 | } | ||
456 | |||
457 | /** | ||
458 | * calipso_doi_getdef - Returns a reference to a valid DOI definition | ||
459 | * @doi: the DOI value | ||
460 | * | ||
461 | * Description: | ||
462 | * Searches for a valid DOI definition and if one is found it is returned to | ||
463 | * the caller. Otherwise NULL is returned. The caller must ensure that | ||
464 | * calipso_doi_putdef() is called when the caller is done. | ||
465 | * | ||
466 | */ | ||
467 | struct calipso_doi *calipso_doi_getdef(u32 doi) | ||
468 | { | ||
469 | struct calipso_doi *ret_val = NULL; | ||
470 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
471 | |||
472 | if (ops) | ||
473 | ret_val = ops->doi_getdef(doi); | ||
474 | return ret_val; | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * calipso_doi_putdef - Releases a reference for the given DOI definition | ||
479 | * @doi_def: the DOI definition | ||
480 | * | ||
481 | * Description: | ||
482 | * Releases a DOI definition reference obtained from calipso_doi_getdef(). | ||
483 | * | ||
484 | */ | ||
485 | void calipso_doi_putdef(struct calipso_doi *doi_def) | ||
486 | { | ||
487 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
488 | |||
489 | if (ops) | ||
490 | ops->doi_putdef(doi_def); | ||
491 | } | ||
492 | |||
493 | /** | ||
494 | * calipso_doi_walk - Iterate through the DOI definitions | ||
495 | * @skip_cnt: skip past this number of DOI definitions, updated | ||
496 | * @callback: callback for each DOI definition | ||
497 | * @cb_arg: argument for the callback function | ||
498 | * | ||
499 | * Description: | ||
500 | * Iterate over the DOI definition list, skipping the first @skip_cnt entries. | ||
501 | * For each entry call @callback, if @callback returns a negative value stop | ||
502 | * 'walking' through the list and return. Updates the value in @skip_cnt upon | ||
503 | * return. Returns zero on success, negative values on failure. | ||
504 | * | ||
505 | */ | ||
506 | int calipso_doi_walk(u32 *skip_cnt, | ||
507 | int (*callback)(struct calipso_doi *doi_def, void *arg), | ||
508 | void *cb_arg) | ||
509 | { | ||
510 | int ret_val = -ENOMSG; | ||
511 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
512 | |||
513 | if (ops) | ||
514 | ret_val = ops->doi_walk(skip_cnt, callback, cb_arg); | ||
515 | return ret_val; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * calipso_sock_getattr - Get the security attributes from a sock | ||
520 | * @sk: the sock | ||
521 | * @secattr: the security attributes | ||
522 | * | ||
523 | * Description: | ||
524 | * Query @sk to see if there is a CALIPSO option attached to the sock and if | ||
525 | * there is return the CALIPSO security attributes in @secattr. This function | ||
526 | * requires that @sk be locked, or privately held, but it does not do any | ||
527 | * locking itself. Returns zero on success and negative values on failure. | ||
528 | * | ||
529 | */ | ||
530 | int calipso_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) | ||
531 | { | ||
532 | int ret_val = -ENOMSG; | ||
533 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
534 | |||
535 | if (ops) | ||
536 | ret_val = ops->sock_getattr(sk, secattr); | ||
537 | return ret_val; | ||
538 | } | ||
539 | |||
540 | /** | ||
541 | * calipso_sock_setattr - Add a CALIPSO option to a socket | ||
542 | * @sk: the socket | ||
543 | * @doi_def: the CALIPSO DOI to use | ||
544 | * @secattr: the specific security attributes of the socket | ||
545 | * | ||
546 | * Description: | ||
547 | * Set the CALIPSO option on the given socket using the DOI definition and | ||
548 | * security attributes passed to the function. This function requires | ||
549 | * exclusive access to @sk, which means it either needs to be in the | ||
550 | * process of being created or locked. Returns zero on success and negative | ||
551 | * values on failure. | ||
552 | * | ||
553 | */ | ||
554 | int calipso_sock_setattr(struct sock *sk, | ||
555 | const struct calipso_doi *doi_def, | ||
556 | const struct netlbl_lsm_secattr *secattr) | ||
557 | { | ||
558 | int ret_val = -ENOMSG; | ||
559 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
560 | |||
561 | if (ops) | ||
562 | ret_val = ops->sock_setattr(sk, doi_def, secattr); | ||
563 | return ret_val; | ||
564 | } | ||
565 | |||
566 | /** | ||
567 | * calipso_sock_delattr - Delete the CALIPSO option from a socket | ||
568 | * @sk: the socket | ||
569 | * | ||
570 | * Description: | ||
571 | * Removes the CALIPSO option from a socket, if present. | ||
572 | * | ||
573 | */ | ||
574 | void calipso_sock_delattr(struct sock *sk) | ||
575 | { | ||
576 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
577 | |||
578 | if (ops) | ||
579 | ops->sock_delattr(sk); | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * calipso_req_setattr - Add a CALIPSO option to a connection request socket | ||
584 | * @req: the connection request socket | ||
585 | * @doi_def: the CALIPSO DOI to use | ||
586 | * @secattr: the specific security attributes of the socket | ||
587 | * | ||
588 | * Description: | ||
589 | * Set the CALIPSO option on the given socket using the DOI definition and | ||
590 | * security attributes passed to the function. Returns zero on success and | ||
591 | * negative values on failure. | ||
592 | * | ||
593 | */ | ||
594 | int calipso_req_setattr(struct request_sock *req, | ||
595 | const struct calipso_doi *doi_def, | ||
596 | const struct netlbl_lsm_secattr *secattr) | ||
597 | { | ||
598 | int ret_val = -ENOMSG; | ||
599 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
600 | |||
601 | if (ops) | ||
602 | ret_val = ops->req_setattr(req, doi_def, secattr); | ||
603 | return ret_val; | ||
604 | } | ||
605 | |||
606 | /** | ||
607 | * calipso_req_delattr - Delete the CALIPSO option from a request socket | ||
608 | * @reg: the request socket | ||
609 | * | ||
610 | * Description: | ||
611 | * Removes the CALIPSO option from a request socket, if present. | ||
612 | * | ||
613 | */ | ||
614 | void calipso_req_delattr(struct request_sock *req) | ||
615 | { | ||
616 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
617 | |||
618 | if (ops) | ||
619 | ops->req_delattr(req); | ||
620 | } | ||
621 | |||
622 | /** | ||
623 | * calipso_optptr - Find the CALIPSO option in the packet | ||
624 | * @skb: the packet | ||
625 | * | ||
626 | * Description: | ||
627 | * Parse the packet's IP header looking for a CALIPSO option. Returns a pointer | ||
628 | * to the start of the CALIPSO option on success, NULL if one if not found. | ||
629 | * | ||
630 | */ | ||
631 | unsigned char *calipso_optptr(const struct sk_buff *skb) | ||
632 | { | ||
633 | unsigned char *ret_val = NULL; | ||
634 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
635 | |||
636 | if (ops) | ||
637 | ret_val = ops->skbuff_optptr(skb); | ||
638 | return ret_val; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * calipso_getattr - Get the security attributes from a memory block. | ||
643 | * @calipso: the CALIPSO option | ||
644 | * @secattr: the security attributes | ||
645 | * | ||
646 | * Description: | ||
647 | * Inspect @calipso and return the security attributes in @secattr. | ||
648 | * Returns zero on success and negative values on failure. | ||
649 | * | ||
650 | */ | ||
651 | int calipso_getattr(const unsigned char *calipso, | ||
652 | struct netlbl_lsm_secattr *secattr) | ||
653 | { | ||
654 | int ret_val = -ENOMSG; | ||
655 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
656 | |||
657 | if (ops) | ||
658 | ret_val = ops->opt_getattr(calipso, secattr); | ||
659 | return ret_val; | ||
660 | } | ||
661 | |||
662 | /** | ||
663 | * calipso_skbuff_setattr - Set the CALIPSO option on a packet | ||
664 | * @skb: the packet | ||
665 | * @doi_def: the CALIPSO DOI to use | ||
666 | * @secattr: the security attributes | ||
667 | * | ||
668 | * Description: | ||
669 | * Set the CALIPSO option on the given packet based on the security attributes. | ||
670 | * Returns a pointer to the IP header on success and NULL on failure. | ||
671 | * | ||
672 | */ | ||
673 | int calipso_skbuff_setattr(struct sk_buff *skb, | ||
674 | const struct calipso_doi *doi_def, | ||
675 | const struct netlbl_lsm_secattr *secattr) | ||
676 | { | ||
677 | int ret_val = -ENOMSG; | ||
678 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
679 | |||
680 | if (ops) | ||
681 | ret_val = ops->skbuff_setattr(skb, doi_def, secattr); | ||
682 | return ret_val; | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * calipso_skbuff_delattr - Delete any CALIPSO options from a packet | ||
687 | * @skb: the packet | ||
688 | * | ||
689 | * Description: | ||
690 | * Removes any and all CALIPSO options from the given packet. Returns zero on | ||
691 | * success, negative values on failure. | ||
692 | * | ||
693 | */ | ||
694 | int calipso_skbuff_delattr(struct sk_buff *skb) | ||
695 | { | ||
696 | int ret_val = -ENOMSG; | ||
697 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
698 | |||
699 | if (ops) | ||
700 | ret_val = ops->skbuff_delattr(skb); | ||
701 | return ret_val; | ||
702 | } | ||
703 | |||
704 | /** | ||
705 | * calipso_cache_invalidate - Invalidates the current CALIPSO cache | ||
706 | * | ||
707 | * Description: | ||
708 | * Invalidates and frees any entries in the CALIPSO cache. Returns zero on | ||
709 | * success and negative values on failure. | ||
710 | * | ||
711 | */ | ||
712 | void calipso_cache_invalidate(void) | ||
713 | { | ||
714 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
715 | |||
716 | if (ops) | ||
717 | ops->cache_invalidate(); | ||
718 | } | ||
719 | |||
720 | /** | ||
721 | * calipso_cache_add - Add an entry to the CALIPSO cache | ||
722 | * @calipso_ptr: the CALIPSO option | ||
723 | * @secattr: the packet's security attributes | ||
724 | * | ||
725 | * Description: | ||
726 | * Add a new entry into the CALIPSO label mapping cache. | ||
727 | * Returns zero on success, negative values on failure. | ||
728 | * | ||
729 | */ | ||
730 | int calipso_cache_add(const unsigned char *calipso_ptr, | ||
731 | const struct netlbl_lsm_secattr *secattr) | ||
732 | |||
733 | { | ||
734 | int ret_val = -ENOMSG; | ||
735 | const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get(); | ||
736 | |||
737 | if (ops) | ||
738 | ret_val = ops->cache_add(calipso_ptr, secattr); | ||
739 | return ret_val; | ||
740 | } | ||
diff --git a/net/netlabel/netlabel_calipso.h b/net/netlabel/netlabel_calipso.h new file mode 100644 index 000000000000..9fd291cd0fc5 --- /dev/null +++ b/net/netlabel/netlabel_calipso.h | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * NetLabel CALIPSO Support | ||
3 | * | ||
4 | * This file defines the CALIPSO functions for the NetLabel system. The | ||
5 | * NetLabel system manages static and dynamic label mappings for network | ||
6 | * protocols such as CIPSO and RIPSO. | ||
7 | * | ||
8 | * Authors: Paul Moore <paul@paul-moore.com> | ||
9 | * Huw Davies <huw@codeweavers.com> | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | /* (c) Copyright Hewlett-Packard Development Company, L.P., 2006 | ||
14 | * (c) Copyright Huw Davies <huw@codeweavers.com>, 2015 | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License as published by | ||
18 | * the Free Software Foundation; either version 2 of the License, or | ||
19 | * (at your option) any later version. | ||
20 | * | ||
21 | * This program is distributed in the hope that it will be useful, | ||
22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
24 | * the GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #ifndef _NETLABEL_CALIPSO | ||
32 | #define _NETLABEL_CALIPSO | ||
33 | |||
34 | #include <net/netlabel.h> | ||
35 | #include <net/calipso.h> | ||
36 | |||
37 | /* The following NetLabel payloads are supported by the CALIPSO subsystem. | ||
38 | * | ||
39 | * o ADD: | ||
40 | * Sent by an application to add a new DOI mapping table. | ||
41 | * | ||
42 | * Required attributes: | ||
43 | * | ||
44 | * NLBL_CALIPSO_A_DOI | ||
45 | * NLBL_CALIPSO_A_MTYPE | ||
46 | * | ||
47 | * If using CALIPSO_MAP_PASS no additional attributes are required. | ||
48 | * | ||
49 | * o REMOVE: | ||
50 | * Sent by an application to remove a specific DOI mapping table from the | ||
51 | * CALIPSO system. | ||
52 | * | ||
53 | * Required attributes: | ||
54 | * | ||
55 | * NLBL_CALIPSO_A_DOI | ||
56 | * | ||
57 | * o LIST: | ||
58 | * Sent by an application to list the details of a DOI definition. On | ||
59 | * success the kernel should send a response using the following format. | ||
60 | * | ||
61 | * Required attributes: | ||
62 | * | ||
63 | * NLBL_CALIPSO_A_DOI | ||
64 | * | ||
65 | * The valid response message format depends on the type of the DOI mapping, | ||
66 | * the defined formats are shown below. | ||
67 | * | ||
68 | * Required attributes: | ||
69 | * | ||
70 | * NLBL_CALIPSO_A_MTYPE | ||
71 | * | ||
72 | * If using CALIPSO_MAP_PASS no additional attributes are required. | ||
73 | * | ||
74 | * o LISTALL: | ||
75 | * This message is sent by an application to list the valid DOIs on the | ||
76 | * system. When sent by an application there is no payload and the | ||
77 | * NLM_F_DUMP flag should be set. The kernel should respond with a series of | ||
78 | * the following messages. | ||
79 | * | ||
80 | * Required attributes: | ||
81 | * | ||
82 | * NLBL_CALIPSO_A_DOI | ||
83 | * NLBL_CALIPSO_A_MTYPE | ||
84 | * | ||
85 | */ | ||
86 | |||
87 | /* NetLabel CALIPSO commands */ | ||
88 | enum { | ||
89 | NLBL_CALIPSO_C_UNSPEC, | ||
90 | NLBL_CALIPSO_C_ADD, | ||
91 | NLBL_CALIPSO_C_REMOVE, | ||
92 | NLBL_CALIPSO_C_LIST, | ||
93 | NLBL_CALIPSO_C_LISTALL, | ||
94 | __NLBL_CALIPSO_C_MAX, | ||
95 | }; | ||
96 | |||
97 | /* NetLabel CALIPSO attributes */ | ||
98 | enum { | ||
99 | NLBL_CALIPSO_A_UNSPEC, | ||
100 | NLBL_CALIPSO_A_DOI, | ||
101 | /* (NLA_U32) | ||
102 | * the DOI value */ | ||
103 | NLBL_CALIPSO_A_MTYPE, | ||
104 | /* (NLA_U32) | ||
105 | * the mapping table type (defined in the calipso.h header as | ||
106 | * CALIPSO_MAP_*) */ | ||
107 | __NLBL_CALIPSO_A_MAX, | ||
108 | }; | ||
109 | |||
110 | #define NLBL_CALIPSO_A_MAX (__NLBL_CALIPSO_A_MAX - 1) | ||
111 | |||
112 | /* NetLabel protocol functions */ | ||
113 | #if IS_ENABLED(CONFIG_IPV6) | ||
114 | int netlbl_calipso_genl_init(void); | ||
115 | #else | ||
116 | static inline int netlbl_calipso_genl_init(void) | ||
117 | { | ||
118 | return 0; | ||
119 | } | ||
120 | #endif | ||
121 | |||
122 | int calipso_doi_add(struct calipso_doi *doi_def, | ||
123 | struct netlbl_audit *audit_info); | ||
124 | void calipso_doi_free(struct calipso_doi *doi_def); | ||
125 | int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info); | ||
126 | struct calipso_doi *calipso_doi_getdef(u32 doi); | ||
127 | void calipso_doi_putdef(struct calipso_doi *doi_def); | ||
128 | int calipso_doi_walk(u32 *skip_cnt, | ||
129 | int (*callback)(struct calipso_doi *doi_def, void *arg), | ||
130 | void *cb_arg); | ||
131 | int calipso_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr); | ||
132 | int calipso_sock_setattr(struct sock *sk, | ||
133 | const struct calipso_doi *doi_def, | ||
134 | const struct netlbl_lsm_secattr *secattr); | ||
135 | void calipso_sock_delattr(struct sock *sk); | ||
136 | int calipso_req_setattr(struct request_sock *req, | ||
137 | const struct calipso_doi *doi_def, | ||
138 | const struct netlbl_lsm_secattr *secattr); | ||
139 | void calipso_req_delattr(struct request_sock *req); | ||
140 | unsigned char *calipso_optptr(const struct sk_buff *skb); | ||
141 | int calipso_getattr(const unsigned char *calipso, | ||
142 | struct netlbl_lsm_secattr *secattr); | ||
143 | int calipso_skbuff_setattr(struct sk_buff *skb, | ||
144 | const struct calipso_doi *doi_def, | ||
145 | const struct netlbl_lsm_secattr *secattr); | ||
146 | int calipso_skbuff_delattr(struct sk_buff *skb); | ||
147 | void calipso_cache_invalidate(void); | ||
148 | int calipso_cache_add(const unsigned char *calipso_ptr, | ||
149 | const struct netlbl_lsm_secattr *secattr); | ||
150 | |||
151 | #endif | ||
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index ada67422234b..41d0e95d171e 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
@@ -37,10 +37,12 @@ | |||
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <net/netlabel.h> | 38 | #include <net/netlabel.h> |
39 | #include <net/cipso_ipv4.h> | 39 | #include <net/cipso_ipv4.h> |
40 | #include <net/calipso.h> | ||
40 | #include <asm/bug.h> | 41 | #include <asm/bug.h> |
41 | 42 | ||
42 | #include "netlabel_mgmt.h" | 43 | #include "netlabel_mgmt.h" |
43 | #include "netlabel_addrlist.h" | 44 | #include "netlabel_addrlist.h" |
45 | #include "netlabel_calipso.h" | ||
44 | #include "netlabel_domainhash.h" | 46 | #include "netlabel_domainhash.h" |
45 | #include "netlabel_user.h" | 47 | #include "netlabel_user.h" |
46 | 48 | ||
@@ -55,8 +57,9 @@ struct netlbl_domhsh_tbl { | |||
55 | static DEFINE_SPINLOCK(netlbl_domhsh_lock); | 57 | static DEFINE_SPINLOCK(netlbl_domhsh_lock); |
56 | #define netlbl_domhsh_rcu_deref(p) \ | 58 | #define netlbl_domhsh_rcu_deref(p) \ |
57 | rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock)) | 59 | rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock)) |
58 | static struct netlbl_domhsh_tbl *netlbl_domhsh; | 60 | static struct netlbl_domhsh_tbl __rcu *netlbl_domhsh; |
59 | static struct netlbl_dom_map *netlbl_domhsh_def; | 61 | static struct netlbl_dom_map __rcu *netlbl_domhsh_def_ipv4; |
62 | static struct netlbl_dom_map __rcu *netlbl_domhsh_def_ipv6; | ||
60 | 63 | ||
61 | /* | 64 | /* |
62 | * Domain Hash Table Helper Functions | 65 | * Domain Hash Table Helper Functions |
@@ -126,18 +129,26 @@ static u32 netlbl_domhsh_hash(const char *key) | |||
126 | return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1); | 129 | return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1); |
127 | } | 130 | } |
128 | 131 | ||
132 | static bool netlbl_family_match(u16 f1, u16 f2) | ||
133 | { | ||
134 | return (f1 == f2) || (f1 == AF_UNSPEC) || (f2 == AF_UNSPEC); | ||
135 | } | ||
136 | |||
129 | /** | 137 | /** |
130 | * netlbl_domhsh_search - Search for a domain entry | 138 | * netlbl_domhsh_search - Search for a domain entry |
131 | * @domain: the domain | 139 | * @domain: the domain |
140 | * @family: the address family | ||
132 | * | 141 | * |
133 | * Description: | 142 | * Description: |
134 | * Searches the domain hash table and returns a pointer to the hash table | 143 | * Searches the domain hash table and returns a pointer to the hash table |
135 | * entry if found, otherwise NULL is returned. The caller is responsible for | 144 | * entry if found, otherwise NULL is returned. @family may be %AF_UNSPEC |
145 | * which matches any address family entries. The caller is responsible for | ||
136 | * ensuring that the hash table is protected with either a RCU read lock or the | 146 | * ensuring that the hash table is protected with either a RCU read lock or the |
137 | * hash table lock. | 147 | * hash table lock. |
138 | * | 148 | * |
139 | */ | 149 | */ |
140 | static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | 150 | static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain, |
151 | u16 family) | ||
141 | { | 152 | { |
142 | u32 bkt; | 153 | u32 bkt; |
143 | struct list_head *bkt_list; | 154 | struct list_head *bkt_list; |
@@ -147,7 +158,9 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | |||
147 | bkt = netlbl_domhsh_hash(domain); | 158 | bkt = netlbl_domhsh_hash(domain); |
148 | bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; | 159 | bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; |
149 | list_for_each_entry_rcu(iter, bkt_list, list) | 160 | list_for_each_entry_rcu(iter, bkt_list, list) |
150 | if (iter->valid && strcmp(iter->domain, domain) == 0) | 161 | if (iter->valid && |
162 | netlbl_family_match(iter->family, family) && | ||
163 | strcmp(iter->domain, domain) == 0) | ||
151 | return iter; | 164 | return iter; |
152 | } | 165 | } |
153 | 166 | ||
@@ -157,28 +170,37 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | |||
157 | /** | 170 | /** |
158 | * netlbl_domhsh_search_def - Search for a domain entry | 171 | * netlbl_domhsh_search_def - Search for a domain entry |
159 | * @domain: the domain | 172 | * @domain: the domain |
160 | * @def: return default if no match is found | 173 | * @family: the address family |
161 | * | 174 | * |
162 | * Description: | 175 | * Description: |
163 | * Searches the domain hash table and returns a pointer to the hash table | 176 | * Searches the domain hash table and returns a pointer to the hash table |
164 | * entry if an exact match is found, if an exact match is not present in the | 177 | * entry if an exact match is found, if an exact match is not present in the |
165 | * hash table then the default entry is returned if valid otherwise NULL is | 178 | * hash table then the default entry is returned if valid otherwise NULL is |
166 | * returned. The caller is responsible ensuring that the hash table is | 179 | * returned. @family may be %AF_UNSPEC which matches any address family |
180 | * entries. The caller is responsible ensuring that the hash table is | ||
167 | * protected with either a RCU read lock or the hash table lock. | 181 | * protected with either a RCU read lock or the hash table lock. |
168 | * | 182 | * |
169 | */ | 183 | */ |
170 | static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) | 184 | static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain, |
185 | u16 family) | ||
171 | { | 186 | { |
172 | struct netlbl_dom_map *entry; | 187 | struct netlbl_dom_map *entry; |
173 | 188 | ||
174 | entry = netlbl_domhsh_search(domain); | 189 | entry = netlbl_domhsh_search(domain, family); |
175 | if (entry == NULL) { | 190 | if (entry != NULL) |
176 | entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); | 191 | return entry; |
177 | if (entry != NULL && !entry->valid) | 192 | if (family == AF_INET || family == AF_UNSPEC) { |
178 | entry = NULL; | 193 | entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv4); |
194 | if (entry != NULL && entry->valid) | ||
195 | return entry; | ||
196 | } | ||
197 | if (family == AF_INET6 || family == AF_UNSPEC) { | ||
198 | entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv6); | ||
199 | if (entry != NULL && entry->valid) | ||
200 | return entry; | ||
179 | } | 201 | } |
180 | 202 | ||
181 | return entry; | 203 | return NULL; |
182 | } | 204 | } |
183 | 205 | ||
184 | /** | 206 | /** |
@@ -203,6 +225,7 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, | |||
203 | { | 225 | { |
204 | struct audit_buffer *audit_buf; | 226 | struct audit_buffer *audit_buf; |
205 | struct cipso_v4_doi *cipsov4 = NULL; | 227 | struct cipso_v4_doi *cipsov4 = NULL; |
228 | struct calipso_doi *calipso = NULL; | ||
206 | u32 type; | 229 | u32 type; |
207 | 230 | ||
208 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); | 231 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); |
@@ -221,12 +244,14 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, | |||
221 | struct netlbl_domaddr6_map *map6; | 244 | struct netlbl_domaddr6_map *map6; |
222 | map6 = netlbl_domhsh_addr6_entry(addr6); | 245 | map6 = netlbl_domhsh_addr6_entry(addr6); |
223 | type = map6->def.type; | 246 | type = map6->def.type; |
247 | calipso = map6->def.calipso; | ||
224 | netlbl_af6list_audit_addr(audit_buf, 0, NULL, | 248 | netlbl_af6list_audit_addr(audit_buf, 0, NULL, |
225 | &addr6->addr, &addr6->mask); | 249 | &addr6->addr, &addr6->mask); |
226 | #endif /* IPv6 */ | 250 | #endif /* IPv6 */ |
227 | } else { | 251 | } else { |
228 | type = entry->def.type; | 252 | type = entry->def.type; |
229 | cipsov4 = entry->def.cipso; | 253 | cipsov4 = entry->def.cipso; |
254 | calipso = entry->def.calipso; | ||
230 | } | 255 | } |
231 | switch (type) { | 256 | switch (type) { |
232 | case NETLBL_NLTYPE_UNLABELED: | 257 | case NETLBL_NLTYPE_UNLABELED: |
@@ -238,6 +263,12 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, | |||
238 | " nlbl_protocol=cipsov4 cipso_doi=%u", | 263 | " nlbl_protocol=cipsov4 cipso_doi=%u", |
239 | cipsov4->doi); | 264 | cipsov4->doi); |
240 | break; | 265 | break; |
266 | case NETLBL_NLTYPE_CALIPSO: | ||
267 | BUG_ON(calipso == NULL); | ||
268 | audit_log_format(audit_buf, | ||
269 | " nlbl_protocol=calipso calipso_doi=%u", | ||
270 | calipso->doi); | ||
271 | break; | ||
241 | } | 272 | } |
242 | audit_log_format(audit_buf, " res=%u", result == 0 ? 1 : 0); | 273 | audit_log_format(audit_buf, " res=%u", result == 0 ? 1 : 0); |
243 | audit_log_end(audit_buf); | 274 | audit_log_end(audit_buf); |
@@ -264,13 +295,25 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) | |||
264 | if (entry == NULL) | 295 | if (entry == NULL) |
265 | return -EINVAL; | 296 | return -EINVAL; |
266 | 297 | ||
298 | if (entry->family != AF_INET && entry->family != AF_INET6 && | ||
299 | (entry->family != AF_UNSPEC || | ||
300 | entry->def.type != NETLBL_NLTYPE_UNLABELED)) | ||
301 | return -EINVAL; | ||
302 | |||
267 | switch (entry->def.type) { | 303 | switch (entry->def.type) { |
268 | case NETLBL_NLTYPE_UNLABELED: | 304 | case NETLBL_NLTYPE_UNLABELED: |
269 | if (entry->def.cipso != NULL || entry->def.addrsel != NULL) | 305 | if (entry->def.cipso != NULL || entry->def.calipso != NULL || |
306 | entry->def.addrsel != NULL) | ||
270 | return -EINVAL; | 307 | return -EINVAL; |
271 | break; | 308 | break; |
272 | case NETLBL_NLTYPE_CIPSOV4: | 309 | case NETLBL_NLTYPE_CIPSOV4: |
273 | if (entry->def.cipso == NULL) | 310 | if (entry->family != AF_INET || |
311 | entry->def.cipso == NULL) | ||
312 | return -EINVAL; | ||
313 | break; | ||
314 | case NETLBL_NLTYPE_CALIPSO: | ||
315 | if (entry->family != AF_INET6 || | ||
316 | entry->def.calipso == NULL) | ||
274 | return -EINVAL; | 317 | return -EINVAL; |
275 | break; | 318 | break; |
276 | case NETLBL_NLTYPE_ADDRSELECT: | 319 | case NETLBL_NLTYPE_ADDRSELECT: |
@@ -294,6 +337,12 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) | |||
294 | map6 = netlbl_domhsh_addr6_entry(iter6); | 337 | map6 = netlbl_domhsh_addr6_entry(iter6); |
295 | switch (map6->def.type) { | 338 | switch (map6->def.type) { |
296 | case NETLBL_NLTYPE_UNLABELED: | 339 | case NETLBL_NLTYPE_UNLABELED: |
340 | if (map6->def.calipso != NULL) | ||
341 | return -EINVAL; | ||
342 | break; | ||
343 | case NETLBL_NLTYPE_CALIPSO: | ||
344 | if (map6->def.calipso == NULL) | ||
345 | return -EINVAL; | ||
297 | break; | 346 | break; |
298 | default: | 347 | default: |
299 | return -EINVAL; | 348 | return -EINVAL; |
@@ -358,15 +407,18 @@ int __init netlbl_domhsh_init(u32 size) | |||
358 | * | 407 | * |
359 | * Description: | 408 | * Description: |
360 | * Adds a new entry to the domain hash table and handles any updates to the | 409 | * Adds a new entry to the domain hash table and handles any updates to the |
361 | * lower level protocol handler (i.e. CIPSO). Returns zero on success, | 410 | * lower level protocol handler (i.e. CIPSO). @entry->family may be set to |
362 | * negative on failure. | 411 | * %AF_UNSPEC which will add an entry that matches all address families. This |
412 | * is only useful for the unlabelled type and will only succeed if there is no | ||
413 | * existing entry for any address family with the same domain. Returns zero | ||
414 | * on success, negative on failure. | ||
363 | * | 415 | * |
364 | */ | 416 | */ |
365 | int netlbl_domhsh_add(struct netlbl_dom_map *entry, | 417 | int netlbl_domhsh_add(struct netlbl_dom_map *entry, |
366 | struct netlbl_audit *audit_info) | 418 | struct netlbl_audit *audit_info) |
367 | { | 419 | { |
368 | int ret_val = 0; | 420 | int ret_val = 0; |
369 | struct netlbl_dom_map *entry_old; | 421 | struct netlbl_dom_map *entry_old, *entry_b; |
370 | struct netlbl_af4list *iter4; | 422 | struct netlbl_af4list *iter4; |
371 | struct netlbl_af4list *tmp4; | 423 | struct netlbl_af4list *tmp4; |
372 | #if IS_ENABLED(CONFIG_IPV6) | 424 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -385,9 +437,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
385 | rcu_read_lock(); | 437 | rcu_read_lock(); |
386 | spin_lock(&netlbl_domhsh_lock); | 438 | spin_lock(&netlbl_domhsh_lock); |
387 | if (entry->domain != NULL) | 439 | if (entry->domain != NULL) |
388 | entry_old = netlbl_domhsh_search(entry->domain); | 440 | entry_old = netlbl_domhsh_search(entry->domain, entry->family); |
389 | else | 441 | else |
390 | entry_old = netlbl_domhsh_search_def(entry->domain); | 442 | entry_old = netlbl_domhsh_search_def(entry->domain, |
443 | entry->family); | ||
391 | if (entry_old == NULL) { | 444 | if (entry_old == NULL) { |
392 | entry->valid = 1; | 445 | entry->valid = 1; |
393 | 446 | ||
@@ -397,7 +450,41 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
397 | &rcu_dereference(netlbl_domhsh)->tbl[bkt]); | 450 | &rcu_dereference(netlbl_domhsh)->tbl[bkt]); |
398 | } else { | 451 | } else { |
399 | INIT_LIST_HEAD(&entry->list); | 452 | INIT_LIST_HEAD(&entry->list); |
400 | rcu_assign_pointer(netlbl_domhsh_def, entry); | 453 | switch (entry->family) { |
454 | case AF_INET: | ||
455 | rcu_assign_pointer(netlbl_domhsh_def_ipv4, | ||
456 | entry); | ||
457 | break; | ||
458 | case AF_INET6: | ||
459 | rcu_assign_pointer(netlbl_domhsh_def_ipv6, | ||
460 | entry); | ||
461 | break; | ||
462 | case AF_UNSPEC: | ||
463 | if (entry->def.type != | ||
464 | NETLBL_NLTYPE_UNLABELED) { | ||
465 | ret_val = -EINVAL; | ||
466 | goto add_return; | ||
467 | } | ||
468 | entry_b = kzalloc(sizeof(*entry_b), GFP_ATOMIC); | ||
469 | if (entry_b == NULL) { | ||
470 | ret_val = -ENOMEM; | ||
471 | goto add_return; | ||
472 | } | ||
473 | entry_b->family = AF_INET6; | ||
474 | entry_b->def.type = NETLBL_NLTYPE_UNLABELED; | ||
475 | entry_b->valid = 1; | ||
476 | entry->family = AF_INET; | ||
477 | rcu_assign_pointer(netlbl_domhsh_def_ipv4, | ||
478 | entry); | ||
479 | rcu_assign_pointer(netlbl_domhsh_def_ipv6, | ||
480 | entry_b); | ||
481 | break; | ||
482 | default: | ||
483 | /* Already checked in | ||
484 | * netlbl_domhsh_validate(). */ | ||
485 | ret_val = -EINVAL; | ||
486 | goto add_return; | ||
487 | } | ||
401 | } | 488 | } |
402 | 489 | ||
403 | if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) { | 490 | if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) { |
@@ -513,10 +600,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, | |||
513 | spin_lock(&netlbl_domhsh_lock); | 600 | spin_lock(&netlbl_domhsh_lock); |
514 | if (entry->valid) { | 601 | if (entry->valid) { |
515 | entry->valid = 0; | 602 | entry->valid = 0; |
516 | if (entry != rcu_dereference(netlbl_domhsh_def)) | 603 | if (entry == rcu_dereference(netlbl_domhsh_def_ipv4)) |
517 | list_del_rcu(&entry->list); | 604 | RCU_INIT_POINTER(netlbl_domhsh_def_ipv4, NULL); |
605 | else if (entry == rcu_dereference(netlbl_domhsh_def_ipv6)) | ||
606 | RCU_INIT_POINTER(netlbl_domhsh_def_ipv6, NULL); | ||
518 | else | 607 | else |
519 | RCU_INIT_POINTER(netlbl_domhsh_def, NULL); | 608 | list_del_rcu(&entry->list); |
520 | } else | 609 | } else |
521 | ret_val = -ENOENT; | 610 | ret_val = -ENOENT; |
522 | spin_unlock(&netlbl_domhsh_lock); | 611 | spin_unlock(&netlbl_domhsh_lock); |
@@ -533,6 +622,10 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, | |||
533 | if (ret_val == 0) { | 622 | if (ret_val == 0) { |
534 | struct netlbl_af4list *iter4; | 623 | struct netlbl_af4list *iter4; |
535 | struct netlbl_domaddr4_map *map4; | 624 | struct netlbl_domaddr4_map *map4; |
625 | #if IS_ENABLED(CONFIG_IPV6) | ||
626 | struct netlbl_af6list *iter6; | ||
627 | struct netlbl_domaddr6_map *map6; | ||
628 | #endif /* IPv6 */ | ||
536 | 629 | ||
537 | switch (entry->def.type) { | 630 | switch (entry->def.type) { |
538 | case NETLBL_NLTYPE_ADDRSELECT: | 631 | case NETLBL_NLTYPE_ADDRSELECT: |
@@ -541,12 +634,22 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, | |||
541 | map4 = netlbl_domhsh_addr4_entry(iter4); | 634 | map4 = netlbl_domhsh_addr4_entry(iter4); |
542 | cipso_v4_doi_putdef(map4->def.cipso); | 635 | cipso_v4_doi_putdef(map4->def.cipso); |
543 | } | 636 | } |
544 | /* no need to check the IPv6 list since we currently | 637 | #if IS_ENABLED(CONFIG_IPV6) |
545 | * support only unlabeled protocols for IPv6 */ | 638 | netlbl_af6list_foreach_rcu(iter6, |
639 | &entry->def.addrsel->list6) { | ||
640 | map6 = netlbl_domhsh_addr6_entry(iter6); | ||
641 | calipso_doi_putdef(map6->def.calipso); | ||
642 | } | ||
643 | #endif /* IPv6 */ | ||
546 | break; | 644 | break; |
547 | case NETLBL_NLTYPE_CIPSOV4: | 645 | case NETLBL_NLTYPE_CIPSOV4: |
548 | cipso_v4_doi_putdef(entry->def.cipso); | 646 | cipso_v4_doi_putdef(entry->def.cipso); |
549 | break; | 647 | break; |
648 | #if IS_ENABLED(CONFIG_IPV6) | ||
649 | case NETLBL_NLTYPE_CALIPSO: | ||
650 | calipso_doi_putdef(entry->def.calipso); | ||
651 | break; | ||
652 | #endif /* IPv6 */ | ||
550 | } | 653 | } |
551 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); | 654 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); |
552 | } | 655 | } |
@@ -583,9 +686,9 @@ int netlbl_domhsh_remove_af4(const char *domain, | |||
583 | rcu_read_lock(); | 686 | rcu_read_lock(); |
584 | 687 | ||
585 | if (domain) | 688 | if (domain) |
586 | entry_map = netlbl_domhsh_search(domain); | 689 | entry_map = netlbl_domhsh_search(domain, AF_INET); |
587 | else | 690 | else |
588 | entry_map = netlbl_domhsh_search_def(domain); | 691 | entry_map = netlbl_domhsh_search_def(domain, AF_INET); |
589 | if (entry_map == NULL || | 692 | if (entry_map == NULL || |
590 | entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT) | 693 | entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT) |
591 | goto remove_af4_failure; | 694 | goto remove_af4_failure; |
@@ -622,28 +725,114 @@ remove_af4_failure: | |||
622 | return -ENOENT; | 725 | return -ENOENT; |
623 | } | 726 | } |
624 | 727 | ||
728 | #if IS_ENABLED(CONFIG_IPV6) | ||
729 | /** | ||
730 | * netlbl_domhsh_remove_af6 - Removes an address selector entry | ||
731 | * @domain: the domain | ||
732 | * @addr: IPv6 address | ||
733 | * @mask: IPv6 address mask | ||
734 | * @audit_info: NetLabel audit information | ||
735 | * | ||
736 | * Description: | ||
737 | * Removes an individual address selector from a domain mapping and potentially | ||
738 | * the entire mapping if it is empty. Returns zero on success, negative values | ||
739 | * on failure. | ||
740 | * | ||
741 | */ | ||
742 | int netlbl_domhsh_remove_af6(const char *domain, | ||
743 | const struct in6_addr *addr, | ||
744 | const struct in6_addr *mask, | ||
745 | struct netlbl_audit *audit_info) | ||
746 | { | ||
747 | struct netlbl_dom_map *entry_map; | ||
748 | struct netlbl_af6list *entry_addr; | ||
749 | struct netlbl_af4list *iter4; | ||
750 | struct netlbl_af6list *iter6; | ||
751 | struct netlbl_domaddr6_map *entry; | ||
752 | |||
753 | rcu_read_lock(); | ||
754 | |||
755 | if (domain) | ||
756 | entry_map = netlbl_domhsh_search(domain, AF_INET6); | ||
757 | else | ||
758 | entry_map = netlbl_domhsh_search_def(domain, AF_INET6); | ||
759 | if (entry_map == NULL || | ||
760 | entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT) | ||
761 | goto remove_af6_failure; | ||
762 | |||
763 | spin_lock(&netlbl_domhsh_lock); | ||
764 | entry_addr = netlbl_af6list_remove(addr, mask, | ||
765 | &entry_map->def.addrsel->list6); | ||
766 | spin_unlock(&netlbl_domhsh_lock); | ||
767 | |||
768 | if (entry_addr == NULL) | ||
769 | goto remove_af6_failure; | ||
770 | netlbl_af4list_foreach_rcu(iter4, &entry_map->def.addrsel->list4) | ||
771 | goto remove_af6_single_addr; | ||
772 | netlbl_af6list_foreach_rcu(iter6, &entry_map->def.addrsel->list6) | ||
773 | goto remove_af6_single_addr; | ||
774 | /* the domain mapping is empty so remove it from the mapping table */ | ||
775 | netlbl_domhsh_remove_entry(entry_map, audit_info); | ||
776 | |||
777 | remove_af6_single_addr: | ||
778 | rcu_read_unlock(); | ||
779 | /* yick, we can't use call_rcu here because we don't have a rcu head | ||
780 | * pointer but hopefully this should be a rare case so the pause | ||
781 | * shouldn't be a problem */ | ||
782 | synchronize_rcu(); | ||
783 | entry = netlbl_domhsh_addr6_entry(entry_addr); | ||
784 | calipso_doi_putdef(entry->def.calipso); | ||
785 | kfree(entry); | ||
786 | return 0; | ||
787 | |||
788 | remove_af6_failure: | ||
789 | rcu_read_unlock(); | ||
790 | return -ENOENT; | ||
791 | } | ||
792 | #endif /* IPv6 */ | ||
793 | |||
625 | /** | 794 | /** |
626 | * netlbl_domhsh_remove - Removes an entry from the domain hash table | 795 | * netlbl_domhsh_remove - Removes an entry from the domain hash table |
627 | * @domain: the domain to remove | 796 | * @domain: the domain to remove |
797 | * @family: address family | ||
628 | * @audit_info: NetLabel audit information | 798 | * @audit_info: NetLabel audit information |
629 | * | 799 | * |
630 | * Description: | 800 | * Description: |
631 | * Removes an entry from the domain hash table and handles any updates to the | 801 | * Removes an entry from the domain hash table and handles any updates to the |
632 | * lower level protocol handler (i.e. CIPSO). Returns zero on success, | 802 | * lower level protocol handler (i.e. CIPSO). @family may be %AF_UNSPEC which |
633 | * negative on failure. | 803 | * removes all address family entries. Returns zero on success, negative on |
804 | * failure. | ||
634 | * | 805 | * |
635 | */ | 806 | */ |
636 | int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | 807 | int netlbl_domhsh_remove(const char *domain, u16 family, |
808 | struct netlbl_audit *audit_info) | ||
637 | { | 809 | { |
638 | int ret_val; | 810 | int ret_val = -EINVAL; |
639 | struct netlbl_dom_map *entry; | 811 | struct netlbl_dom_map *entry; |
640 | 812 | ||
641 | rcu_read_lock(); | 813 | rcu_read_lock(); |
642 | if (domain) | 814 | |
643 | entry = netlbl_domhsh_search(domain); | 815 | if (family == AF_INET || family == AF_UNSPEC) { |
644 | else | 816 | if (domain) |
645 | entry = netlbl_domhsh_search_def(domain); | 817 | entry = netlbl_domhsh_search(domain, AF_INET); |
646 | ret_val = netlbl_domhsh_remove_entry(entry, audit_info); | 818 | else |
819 | entry = netlbl_domhsh_search_def(domain, AF_INET); | ||
820 | ret_val = netlbl_domhsh_remove_entry(entry, audit_info); | ||
821 | if (ret_val && ret_val != -ENOENT) | ||
822 | goto done; | ||
823 | } | ||
824 | if (family == AF_INET6 || family == AF_UNSPEC) { | ||
825 | int ret_val2; | ||
826 | |||
827 | if (domain) | ||
828 | entry = netlbl_domhsh_search(domain, AF_INET6); | ||
829 | else | ||
830 | entry = netlbl_domhsh_search_def(domain, AF_INET6); | ||
831 | ret_val2 = netlbl_domhsh_remove_entry(entry, audit_info); | ||
832 | if (ret_val2 != -ENOENT) | ||
833 | ret_val = ret_val2; | ||
834 | } | ||
835 | done: | ||
647 | rcu_read_unlock(); | 836 | rcu_read_unlock(); |
648 | 837 | ||
649 | return ret_val; | 838 | return ret_val; |
@@ -651,32 +840,38 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | |||
651 | 840 | ||
652 | /** | 841 | /** |
653 | * netlbl_domhsh_remove_default - Removes the default entry from the table | 842 | * netlbl_domhsh_remove_default - Removes the default entry from the table |
843 | * @family: address family | ||
654 | * @audit_info: NetLabel audit information | 844 | * @audit_info: NetLabel audit information |
655 | * | 845 | * |
656 | * Description: | 846 | * Description: |
657 | * Removes/resets the default entry for the domain hash table and handles any | 847 | * Removes/resets the default entry corresponding to @family from the domain |
658 | * updates to the lower level protocol handler (i.e. CIPSO). Returns zero on | 848 | * hash table and handles any updates to the lower level protocol handler |
659 | * success, non-zero on failure. | 849 | * (i.e. CIPSO). @family may be %AF_UNSPEC which removes all address family |
850 | * entries. Returns zero on success, negative on failure. | ||
660 | * | 851 | * |
661 | */ | 852 | */ |
662 | int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info) | 853 | int netlbl_domhsh_remove_default(u16 family, struct netlbl_audit *audit_info) |
663 | { | 854 | { |
664 | return netlbl_domhsh_remove(NULL, audit_info); | 855 | return netlbl_domhsh_remove(NULL, family, audit_info); |
665 | } | 856 | } |
666 | 857 | ||
667 | /** | 858 | /** |
668 | * netlbl_domhsh_getentry - Get an entry from the domain hash table | 859 | * netlbl_domhsh_getentry - Get an entry from the domain hash table |
669 | * @domain: the domain name to search for | 860 | * @domain: the domain name to search for |
861 | * @family: address family | ||
670 | * | 862 | * |
671 | * Description: | 863 | * Description: |
672 | * Look through the domain hash table searching for an entry to match @domain, | 864 | * Look through the domain hash table searching for an entry to match @domain, |
673 | * return a pointer to a copy of the entry or NULL. The caller is responsible | 865 | * with address family @family, return a pointer to a copy of the entry or |
674 | * for ensuring that rcu_read_[un]lock() is called. | 866 | * NULL. The caller is responsible for ensuring that rcu_read_[un]lock() is |
867 | * called. | ||
675 | * | 868 | * |
676 | */ | 869 | */ |
677 | struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain) | 870 | struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain, u16 family) |
678 | { | 871 | { |
679 | return netlbl_domhsh_search_def(domain); | 872 | if (family == AF_UNSPEC) |
873 | return NULL; | ||
874 | return netlbl_domhsh_search_def(domain, family); | ||
680 | } | 875 | } |
681 | 876 | ||
682 | /** | 877 | /** |
@@ -696,7 +891,7 @@ struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain, | |||
696 | struct netlbl_dom_map *dom_iter; | 891 | struct netlbl_dom_map *dom_iter; |
697 | struct netlbl_af4list *addr_iter; | 892 | struct netlbl_af4list *addr_iter; |
698 | 893 | ||
699 | dom_iter = netlbl_domhsh_search_def(domain); | 894 | dom_iter = netlbl_domhsh_search_def(domain, AF_INET); |
700 | if (dom_iter == NULL) | 895 | if (dom_iter == NULL) |
701 | return NULL; | 896 | return NULL; |
702 | 897 | ||
@@ -726,7 +921,7 @@ struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain, | |||
726 | struct netlbl_dom_map *dom_iter; | 921 | struct netlbl_dom_map *dom_iter; |
727 | struct netlbl_af6list *addr_iter; | 922 | struct netlbl_af6list *addr_iter; |
728 | 923 | ||
729 | dom_iter = netlbl_domhsh_search_def(domain); | 924 | dom_iter = netlbl_domhsh_search_def(domain, AF_INET6); |
730 | if (dom_iter == NULL) | 925 | if (dom_iter == NULL) |
731 | return NULL; | 926 | return NULL; |
732 | 927 | ||
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h index 680caf4dff56..1f9247781927 100644 --- a/net/netlabel/netlabel_domainhash.h +++ b/net/netlabel/netlabel_domainhash.h | |||
@@ -51,6 +51,7 @@ struct netlbl_dommap_def { | |||
51 | union { | 51 | union { |
52 | struct netlbl_domaddr_map *addrsel; | 52 | struct netlbl_domaddr_map *addrsel; |
53 | struct cipso_v4_doi *cipso; | 53 | struct cipso_v4_doi *cipso; |
54 | struct calipso_doi *calipso; | ||
54 | }; | 55 | }; |
55 | }; | 56 | }; |
56 | #define netlbl_domhsh_addr4_entry(iter) \ | 57 | #define netlbl_domhsh_addr4_entry(iter) \ |
@@ -70,6 +71,7 @@ struct netlbl_domaddr6_map { | |||
70 | 71 | ||
71 | struct netlbl_dom_map { | 72 | struct netlbl_dom_map { |
72 | char *domain; | 73 | char *domain; |
74 | u16 family; | ||
73 | struct netlbl_dommap_def def; | 75 | struct netlbl_dommap_def def; |
74 | 76 | ||
75 | u32 valid; | 77 | u32 valid; |
@@ -91,14 +93,23 @@ int netlbl_domhsh_remove_af4(const char *domain, | |||
91 | const struct in_addr *addr, | 93 | const struct in_addr *addr, |
92 | const struct in_addr *mask, | 94 | const struct in_addr *mask, |
93 | struct netlbl_audit *audit_info); | 95 | struct netlbl_audit *audit_info); |
94 | int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info); | 96 | int netlbl_domhsh_remove_af6(const char *domain, |
95 | int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info); | 97 | const struct in6_addr *addr, |
96 | struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain); | 98 | const struct in6_addr *mask, |
99 | struct netlbl_audit *audit_info); | ||
100 | int netlbl_domhsh_remove(const char *domain, u16 family, | ||
101 | struct netlbl_audit *audit_info); | ||
102 | int netlbl_domhsh_remove_default(u16 family, struct netlbl_audit *audit_info); | ||
103 | struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain, u16 family); | ||
97 | struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain, | 104 | struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain, |
98 | __be32 addr); | 105 | __be32 addr); |
99 | #if IS_ENABLED(CONFIG_IPV6) | 106 | #if IS_ENABLED(CONFIG_IPV6) |
100 | struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain, | 107 | struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain, |
101 | const struct in6_addr *addr); | 108 | const struct in6_addr *addr); |
109 | int netlbl_domhsh_remove_af6(const char *domain, | ||
110 | const struct in6_addr *addr, | ||
111 | const struct in6_addr *mask, | ||
112 | struct netlbl_audit *audit_info); | ||
102 | #endif /* IPv6 */ | 113 | #endif /* IPv6 */ |
103 | 114 | ||
104 | int netlbl_domhsh_walk(u32 *skip_bkt, | 115 | int netlbl_domhsh_walk(u32 *skip_bkt, |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 1325776daa27..28c56b95fb7f 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -37,12 +37,14 @@ | |||
37 | #include <net/ipv6.h> | 37 | #include <net/ipv6.h> |
38 | #include <net/netlabel.h> | 38 | #include <net/netlabel.h> |
39 | #include <net/cipso_ipv4.h> | 39 | #include <net/cipso_ipv4.h> |
40 | #include <net/calipso.h> | ||
40 | #include <asm/bug.h> | 41 | #include <asm/bug.h> |
41 | #include <linux/atomic.h> | 42 | #include <linux/atomic.h> |
42 | 43 | ||
43 | #include "netlabel_domainhash.h" | 44 | #include "netlabel_domainhash.h" |
44 | #include "netlabel_unlabeled.h" | 45 | #include "netlabel_unlabeled.h" |
45 | #include "netlabel_cipso_v4.h" | 46 | #include "netlabel_cipso_v4.h" |
47 | #include "netlabel_calipso.h" | ||
46 | #include "netlabel_user.h" | 48 | #include "netlabel_user.h" |
47 | #include "netlabel_mgmt.h" | 49 | #include "netlabel_mgmt.h" |
48 | #include "netlabel_addrlist.h" | 50 | #include "netlabel_addrlist.h" |
@@ -72,12 +74,17 @@ int netlbl_cfg_map_del(const char *domain, | |||
72 | struct netlbl_audit *audit_info) | 74 | struct netlbl_audit *audit_info) |
73 | { | 75 | { |
74 | if (addr == NULL && mask == NULL) { | 76 | if (addr == NULL && mask == NULL) { |
75 | return netlbl_domhsh_remove(domain, audit_info); | 77 | return netlbl_domhsh_remove(domain, family, audit_info); |
76 | } else if (addr != NULL && mask != NULL) { | 78 | } else if (addr != NULL && mask != NULL) { |
77 | switch (family) { | 79 | switch (family) { |
78 | case AF_INET: | 80 | case AF_INET: |
79 | return netlbl_domhsh_remove_af4(domain, addr, mask, | 81 | return netlbl_domhsh_remove_af4(domain, addr, mask, |
80 | audit_info); | 82 | audit_info); |
83 | #if IS_ENABLED(CONFIG_IPV6) | ||
84 | case AF_INET6: | ||
85 | return netlbl_domhsh_remove_af6(domain, addr, mask, | ||
86 | audit_info); | ||
87 | #endif /* IPv6 */ | ||
81 | default: | 88 | default: |
82 | return -EPFNOSUPPORT; | 89 | return -EPFNOSUPPORT; |
83 | } | 90 | } |
@@ -119,6 +126,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
119 | if (entry->domain == NULL) | 126 | if (entry->domain == NULL) |
120 | goto cfg_unlbl_map_add_failure; | 127 | goto cfg_unlbl_map_add_failure; |
121 | } | 128 | } |
129 | entry->family = family; | ||
122 | 130 | ||
123 | if (addr == NULL && mask == NULL) | 131 | if (addr == NULL && mask == NULL) |
124 | entry->def.type = NETLBL_NLTYPE_UNLABELED; | 132 | entry->def.type = NETLBL_NLTYPE_UNLABELED; |
@@ -345,6 +353,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
345 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 353 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
346 | if (entry == NULL) | 354 | if (entry == NULL) |
347 | goto out_entry; | 355 | goto out_entry; |
356 | entry->family = AF_INET; | ||
348 | if (domain != NULL) { | 357 | if (domain != NULL) { |
349 | entry->domain = kstrdup(domain, GFP_ATOMIC); | 358 | entry->domain = kstrdup(domain, GFP_ATOMIC); |
350 | if (entry->domain == NULL) | 359 | if (entry->domain == NULL) |
@@ -399,6 +408,139 @@ out_entry: | |||
399 | return ret_val; | 408 | return ret_val; |
400 | } | 409 | } |
401 | 410 | ||
411 | /** | ||
412 | * netlbl_cfg_calipso_add - Add a new CALIPSO DOI definition | ||
413 | * @doi_def: CALIPSO DOI definition | ||
414 | * @audit_info: NetLabel audit information | ||
415 | * | ||
416 | * Description: | ||
417 | * Add a new CALIPSO DOI definition as defined by @doi_def. Returns zero on | ||
418 | * success and negative values on failure. | ||
419 | * | ||
420 | */ | ||
421 | int netlbl_cfg_calipso_add(struct calipso_doi *doi_def, | ||
422 | struct netlbl_audit *audit_info) | ||
423 | { | ||
424 | #if IS_ENABLED(CONFIG_IPV6) | ||
425 | return calipso_doi_add(doi_def, audit_info); | ||
426 | #else /* IPv6 */ | ||
427 | return -ENOSYS; | ||
428 | #endif /* IPv6 */ | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * netlbl_cfg_calipso_del - Remove an existing CALIPSO DOI definition | ||
433 | * @doi: CALIPSO DOI | ||
434 | * @audit_info: NetLabel audit information | ||
435 | * | ||
436 | * Description: | ||
437 | * Remove an existing CALIPSO DOI definition matching @doi. Returns zero on | ||
438 | * success and negative values on failure. | ||
439 | * | ||
440 | */ | ||
441 | void netlbl_cfg_calipso_del(u32 doi, struct netlbl_audit *audit_info) | ||
442 | { | ||
443 | #if IS_ENABLED(CONFIG_IPV6) | ||
444 | calipso_doi_remove(doi, audit_info); | ||
445 | #endif /* IPv6 */ | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * netlbl_cfg_calipso_map_add - Add a new CALIPSO DOI mapping | ||
450 | * @doi: the CALIPSO DOI | ||
451 | * @domain: the domain mapping to add | ||
452 | * @addr: IP address | ||
453 | * @mask: IP address mask | ||
454 | * @audit_info: NetLabel audit information | ||
455 | * | ||
456 | * Description: | ||
457 | * Add a new NetLabel/LSM domain mapping for the given CALIPSO DOI to the | ||
458 | * NetLabel subsystem. A @domain value of NULL adds a new default domain | ||
459 | * mapping. Returns zero on success, negative values on failure. | ||
460 | * | ||
461 | */ | ||
462 | int netlbl_cfg_calipso_map_add(u32 doi, | ||
463 | const char *domain, | ||
464 | const struct in6_addr *addr, | ||
465 | const struct in6_addr *mask, | ||
466 | struct netlbl_audit *audit_info) | ||
467 | { | ||
468 | #if IS_ENABLED(CONFIG_IPV6) | ||
469 | int ret_val = -ENOMEM; | ||
470 | struct calipso_doi *doi_def; | ||
471 | struct netlbl_dom_map *entry; | ||
472 | struct netlbl_domaddr_map *addrmap = NULL; | ||
473 | struct netlbl_domaddr6_map *addrinfo = NULL; | ||
474 | |||
475 | doi_def = calipso_doi_getdef(doi); | ||
476 | if (doi_def == NULL) | ||
477 | return -ENOENT; | ||
478 | |||
479 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | ||
480 | if (entry == NULL) | ||
481 | goto out_entry; | ||
482 | entry->family = AF_INET6; | ||
483 | if (domain != NULL) { | ||
484 | entry->domain = kstrdup(domain, GFP_ATOMIC); | ||
485 | if (entry->domain == NULL) | ||
486 | goto out_domain; | ||
487 | } | ||
488 | |||
489 | if (addr == NULL && mask == NULL) { | ||
490 | entry->def.calipso = doi_def; | ||
491 | entry->def.type = NETLBL_NLTYPE_CALIPSO; | ||
492 | } else if (addr != NULL && mask != NULL) { | ||
493 | addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); | ||
494 | if (addrmap == NULL) | ||
495 | goto out_addrmap; | ||
496 | INIT_LIST_HEAD(&addrmap->list4); | ||
497 | INIT_LIST_HEAD(&addrmap->list6); | ||
498 | |||
499 | addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); | ||
500 | if (addrinfo == NULL) | ||
501 | goto out_addrinfo; | ||
502 | addrinfo->def.calipso = doi_def; | ||
503 | addrinfo->def.type = NETLBL_NLTYPE_CALIPSO; | ||
504 | addrinfo->list.addr = *addr; | ||
505 | addrinfo->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; | ||
506 | addrinfo->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; | ||
507 | addrinfo->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; | ||
508 | addrinfo->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; | ||
509 | addrinfo->list.mask = *mask; | ||
510 | addrinfo->list.valid = 1; | ||
511 | ret_val = netlbl_af6list_add(&addrinfo->list, &addrmap->list6); | ||
512 | if (ret_val != 0) | ||
513 | goto cfg_calipso_map_add_failure; | ||
514 | |||
515 | entry->def.addrsel = addrmap; | ||
516 | entry->def.type = NETLBL_NLTYPE_ADDRSELECT; | ||
517 | } else { | ||
518 | ret_val = -EINVAL; | ||
519 | goto out_addrmap; | ||
520 | } | ||
521 | |||
522 | ret_val = netlbl_domhsh_add(entry, audit_info); | ||
523 | if (ret_val != 0) | ||
524 | goto cfg_calipso_map_add_failure; | ||
525 | |||
526 | return 0; | ||
527 | |||
528 | cfg_calipso_map_add_failure: | ||
529 | kfree(addrinfo); | ||
530 | out_addrinfo: | ||
531 | kfree(addrmap); | ||
532 | out_addrmap: | ||
533 | kfree(entry->domain); | ||
534 | out_domain: | ||
535 | kfree(entry); | ||
536 | out_entry: | ||
537 | calipso_doi_putdef(doi_def); | ||
538 | return ret_val; | ||
539 | #else /* IPv6 */ | ||
540 | return -ENOSYS; | ||
541 | #endif /* IPv6 */ | ||
542 | } | ||
543 | |||
402 | /* | 544 | /* |
403 | * Security Attribute Functions | 545 | * Security Attribute Functions |
404 | */ | 546 | */ |
@@ -519,6 +661,7 @@ int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset) | |||
519 | 661 | ||
520 | return -ENOENT; | 662 | return -ENOENT; |
521 | } | 663 | } |
664 | EXPORT_SYMBOL(netlbl_catmap_walk); | ||
522 | 665 | ||
523 | /** | 666 | /** |
524 | * netlbl_catmap_walkrng - Find the end of a string of set bits | 667 | * netlbl_catmap_walkrng - Find the end of a string of set bits |
@@ -609,20 +752,19 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap, | |||
609 | off = catmap->startbit; | 752 | off = catmap->startbit; |
610 | *offset = off; | 753 | *offset = off; |
611 | } | 754 | } |
612 | iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_NONE, 0); | 755 | iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_WALK, 0); |
613 | if (iter == NULL) { | 756 | if (iter == NULL) { |
614 | *offset = (u32)-1; | 757 | *offset = (u32)-1; |
615 | return 0; | 758 | return 0; |
616 | } | 759 | } |
617 | 760 | ||
618 | if (off < iter->startbit) { | 761 | if (off < iter->startbit) { |
619 | off = iter->startbit; | 762 | *offset = iter->startbit; |
620 | *offset = off; | 763 | off = 0; |
621 | } else | 764 | } else |
622 | off -= iter->startbit; | 765 | off -= iter->startbit; |
623 | |||
624 | idx = off / NETLBL_CATMAP_MAPSIZE; | 766 | idx = off / NETLBL_CATMAP_MAPSIZE; |
625 | *bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_SIZE); | 767 | *bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_MAPSIZE); |
626 | 768 | ||
627 | return 0; | 769 | return 0; |
628 | } | 770 | } |
@@ -655,6 +797,7 @@ int netlbl_catmap_setbit(struct netlbl_lsm_catmap **catmap, | |||
655 | 797 | ||
656 | return 0; | 798 | return 0; |
657 | } | 799 | } |
800 | EXPORT_SYMBOL(netlbl_catmap_setbit); | ||
658 | 801 | ||
659 | /** | 802 | /** |
660 | * netlbl_catmap_setrng - Set a range of bits in a LSM secattr catmap | 803 | * netlbl_catmap_setrng - Set a range of bits in a LSM secattr catmap |
@@ -727,6 +870,76 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap, | |||
727 | return 0; | 870 | return 0; |
728 | } | 871 | } |
729 | 872 | ||
873 | /* Bitmap functions | ||
874 | */ | ||
875 | |||
876 | /** | ||
877 | * netlbl_bitmap_walk - Walk a bitmap looking for a bit | ||
878 | * @bitmap: the bitmap | ||
879 | * @bitmap_len: length in bits | ||
880 | * @offset: starting offset | ||
881 | * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit | ||
882 | * | ||
883 | * Description: | ||
884 | * Starting at @offset, walk the bitmap from left to right until either the | ||
885 | * desired bit is found or we reach the end. Return the bit offset, -1 if | ||
886 | * not found, or -2 if error. | ||
887 | */ | ||
888 | int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, | ||
889 | u32 offset, u8 state) | ||
890 | { | ||
891 | u32 bit_spot; | ||
892 | u32 byte_offset; | ||
893 | unsigned char bitmask; | ||
894 | unsigned char byte; | ||
895 | |||
896 | byte_offset = offset / 8; | ||
897 | byte = bitmap[byte_offset]; | ||
898 | bit_spot = offset; | ||
899 | bitmask = 0x80 >> (offset % 8); | ||
900 | |||
901 | while (bit_spot < bitmap_len) { | ||
902 | if ((state && (byte & bitmask) == bitmask) || | ||
903 | (state == 0 && (byte & bitmask) == 0)) | ||
904 | return bit_spot; | ||
905 | |||
906 | bit_spot++; | ||
907 | bitmask >>= 1; | ||
908 | if (bitmask == 0) { | ||
909 | byte = bitmap[++byte_offset]; | ||
910 | bitmask = 0x80; | ||
911 | } | ||
912 | } | ||
913 | |||
914 | return -1; | ||
915 | } | ||
916 | EXPORT_SYMBOL(netlbl_bitmap_walk); | ||
917 | |||
918 | /** | ||
919 | * netlbl_bitmap_setbit - Sets a single bit in a bitmap | ||
920 | * @bitmap: the bitmap | ||
921 | * @bit: the bit | ||
922 | * @state: if non-zero, set the bit (1) else clear the bit (0) | ||
923 | * | ||
924 | * Description: | ||
925 | * Set a single bit in the bitmask. Returns zero on success, negative values | ||
926 | * on error. | ||
927 | */ | ||
928 | void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) | ||
929 | { | ||
930 | u32 byte_spot; | ||
931 | u8 bitmask; | ||
932 | |||
933 | /* gcc always rounds to zero when doing integer division */ | ||
934 | byte_spot = bit / 8; | ||
935 | bitmask = 0x80 >> (bit % 8); | ||
936 | if (state) | ||
937 | bitmap[byte_spot] |= bitmask; | ||
938 | else | ||
939 | bitmap[byte_spot] &= ~bitmask; | ||
940 | } | ||
941 | EXPORT_SYMBOL(netlbl_bitmap_setbit); | ||
942 | |||
730 | /* | 943 | /* |
731 | * LSM Functions | 944 | * LSM Functions |
732 | */ | 945 | */ |
@@ -774,7 +987,7 @@ int netlbl_sock_setattr(struct sock *sk, | |||
774 | struct netlbl_dom_map *dom_entry; | 987 | struct netlbl_dom_map *dom_entry; |
775 | 988 | ||
776 | rcu_read_lock(); | 989 | rcu_read_lock(); |
777 | dom_entry = netlbl_domhsh_getentry(secattr->domain); | 990 | dom_entry = netlbl_domhsh_getentry(secattr->domain, family); |
778 | if (dom_entry == NULL) { | 991 | if (dom_entry == NULL) { |
779 | ret_val = -ENOENT; | 992 | ret_val = -ENOENT; |
780 | goto socket_setattr_return; | 993 | goto socket_setattr_return; |
@@ -799,9 +1012,21 @@ int netlbl_sock_setattr(struct sock *sk, | |||
799 | break; | 1012 | break; |
800 | #if IS_ENABLED(CONFIG_IPV6) | 1013 | #if IS_ENABLED(CONFIG_IPV6) |
801 | case AF_INET6: | 1014 | case AF_INET6: |
802 | /* since we don't support any IPv6 labeling protocols right | 1015 | switch (dom_entry->def.type) { |
803 | * now we can optimize everything away until we do */ | 1016 | case NETLBL_NLTYPE_ADDRSELECT: |
804 | ret_val = 0; | 1017 | ret_val = -EDESTADDRREQ; |
1018 | break; | ||
1019 | case NETLBL_NLTYPE_CALIPSO: | ||
1020 | ret_val = calipso_sock_setattr(sk, | ||
1021 | dom_entry->def.calipso, | ||
1022 | secattr); | ||
1023 | break; | ||
1024 | case NETLBL_NLTYPE_UNLABELED: | ||
1025 | ret_val = 0; | ||
1026 | break; | ||
1027 | default: | ||
1028 | ret_val = -ENOENT; | ||
1029 | } | ||
805 | break; | 1030 | break; |
806 | #endif /* IPv6 */ | 1031 | #endif /* IPv6 */ |
807 | default: | 1032 | default: |
@@ -824,7 +1049,16 @@ socket_setattr_return: | |||
824 | */ | 1049 | */ |
825 | void netlbl_sock_delattr(struct sock *sk) | 1050 | void netlbl_sock_delattr(struct sock *sk) |
826 | { | 1051 | { |
827 | cipso_v4_sock_delattr(sk); | 1052 | switch (sk->sk_family) { |
1053 | case AF_INET: | ||
1054 | cipso_v4_sock_delattr(sk); | ||
1055 | break; | ||
1056 | #if IS_ENABLED(CONFIG_IPV6) | ||
1057 | case AF_INET6: | ||
1058 | calipso_sock_delattr(sk); | ||
1059 | break; | ||
1060 | #endif /* IPv6 */ | ||
1061 | } | ||
828 | } | 1062 | } |
829 | 1063 | ||
830 | /** | 1064 | /** |
@@ -850,7 +1084,7 @@ int netlbl_sock_getattr(struct sock *sk, | |||
850 | break; | 1084 | break; |
851 | #if IS_ENABLED(CONFIG_IPV6) | 1085 | #if IS_ENABLED(CONFIG_IPV6) |
852 | case AF_INET6: | 1086 | case AF_INET6: |
853 | ret_val = -ENOMSG; | 1087 | ret_val = calipso_sock_getattr(sk, secattr); |
854 | break; | 1088 | break; |
855 | #endif /* IPv6 */ | 1089 | #endif /* IPv6 */ |
856 | default: | 1090 | default: |
@@ -878,6 +1112,9 @@ int netlbl_conn_setattr(struct sock *sk, | |||
878 | { | 1112 | { |
879 | int ret_val; | 1113 | int ret_val; |
880 | struct sockaddr_in *addr4; | 1114 | struct sockaddr_in *addr4; |
1115 | #if IS_ENABLED(CONFIG_IPV6) | ||
1116 | struct sockaddr_in6 *addr6; | ||
1117 | #endif | ||
881 | struct netlbl_dommap_def *entry; | 1118 | struct netlbl_dommap_def *entry; |
882 | 1119 | ||
883 | rcu_read_lock(); | 1120 | rcu_read_lock(); |
@@ -898,7 +1135,7 @@ int netlbl_conn_setattr(struct sock *sk, | |||
898 | case NETLBL_NLTYPE_UNLABELED: | 1135 | case NETLBL_NLTYPE_UNLABELED: |
899 | /* just delete the protocols we support for right now | 1136 | /* just delete the protocols we support for right now |
900 | * but we could remove other protocols if needed */ | 1137 | * but we could remove other protocols if needed */ |
901 | cipso_v4_sock_delattr(sk); | 1138 | netlbl_sock_delattr(sk); |
902 | ret_val = 0; | 1139 | ret_val = 0; |
903 | break; | 1140 | break; |
904 | default: | 1141 | default: |
@@ -907,9 +1144,27 @@ int netlbl_conn_setattr(struct sock *sk, | |||
907 | break; | 1144 | break; |
908 | #if IS_ENABLED(CONFIG_IPV6) | 1145 | #if IS_ENABLED(CONFIG_IPV6) |
909 | case AF_INET6: | 1146 | case AF_INET6: |
910 | /* since we don't support any IPv6 labeling protocols right | 1147 | addr6 = (struct sockaddr_in6 *)addr; |
911 | * now we can optimize everything away until we do */ | 1148 | entry = netlbl_domhsh_getentry_af6(secattr->domain, |
912 | ret_val = 0; | 1149 | &addr6->sin6_addr); |
1150 | if (entry == NULL) { | ||
1151 | ret_val = -ENOENT; | ||
1152 | goto conn_setattr_return; | ||
1153 | } | ||
1154 | switch (entry->type) { | ||
1155 | case NETLBL_NLTYPE_CALIPSO: | ||
1156 | ret_val = calipso_sock_setattr(sk, | ||
1157 | entry->calipso, secattr); | ||
1158 | break; | ||
1159 | case NETLBL_NLTYPE_UNLABELED: | ||
1160 | /* just delete the protocols we support for right now | ||
1161 | * but we could remove other protocols if needed */ | ||
1162 | netlbl_sock_delattr(sk); | ||
1163 | ret_val = 0; | ||
1164 | break; | ||
1165 | default: | ||
1166 | ret_val = -ENOENT; | ||
1167 | } | ||
913 | break; | 1168 | break; |
914 | #endif /* IPv6 */ | 1169 | #endif /* IPv6 */ |
915 | default: | 1170 | default: |
@@ -936,12 +1191,13 @@ int netlbl_req_setattr(struct request_sock *req, | |||
936 | { | 1191 | { |
937 | int ret_val; | 1192 | int ret_val; |
938 | struct netlbl_dommap_def *entry; | 1193 | struct netlbl_dommap_def *entry; |
1194 | struct inet_request_sock *ireq = inet_rsk(req); | ||
939 | 1195 | ||
940 | rcu_read_lock(); | 1196 | rcu_read_lock(); |
941 | switch (req->rsk_ops->family) { | 1197 | switch (req->rsk_ops->family) { |
942 | case AF_INET: | 1198 | case AF_INET: |
943 | entry = netlbl_domhsh_getentry_af4(secattr->domain, | 1199 | entry = netlbl_domhsh_getentry_af4(secattr->domain, |
944 | inet_rsk(req)->ir_rmt_addr); | 1200 | ireq->ir_rmt_addr); |
945 | if (entry == NULL) { | 1201 | if (entry == NULL) { |
946 | ret_val = -ENOENT; | 1202 | ret_val = -ENOENT; |
947 | goto req_setattr_return; | 1203 | goto req_setattr_return; |
@@ -952,9 +1208,7 @@ int netlbl_req_setattr(struct request_sock *req, | |||
952 | entry->cipso, secattr); | 1208 | entry->cipso, secattr); |
953 | break; | 1209 | break; |
954 | case NETLBL_NLTYPE_UNLABELED: | 1210 | case NETLBL_NLTYPE_UNLABELED: |
955 | /* just delete the protocols we support for right now | 1211 | netlbl_req_delattr(req); |
956 | * but we could remove other protocols if needed */ | ||
957 | cipso_v4_req_delattr(req); | ||
958 | ret_val = 0; | 1212 | ret_val = 0; |
959 | break; | 1213 | break; |
960 | default: | 1214 | default: |
@@ -963,9 +1217,24 @@ int netlbl_req_setattr(struct request_sock *req, | |||
963 | break; | 1217 | break; |
964 | #if IS_ENABLED(CONFIG_IPV6) | 1218 | #if IS_ENABLED(CONFIG_IPV6) |
965 | case AF_INET6: | 1219 | case AF_INET6: |
966 | /* since we don't support any IPv6 labeling protocols right | 1220 | entry = netlbl_domhsh_getentry_af6(secattr->domain, |
967 | * now we can optimize everything away until we do */ | 1221 | &ireq->ir_v6_rmt_addr); |
968 | ret_val = 0; | 1222 | if (entry == NULL) { |
1223 | ret_val = -ENOENT; | ||
1224 | goto req_setattr_return; | ||
1225 | } | ||
1226 | switch (entry->type) { | ||
1227 | case NETLBL_NLTYPE_CALIPSO: | ||
1228 | ret_val = calipso_req_setattr(req, | ||
1229 | entry->calipso, secattr); | ||
1230 | break; | ||
1231 | case NETLBL_NLTYPE_UNLABELED: | ||
1232 | netlbl_req_delattr(req); | ||
1233 | ret_val = 0; | ||
1234 | break; | ||
1235 | default: | ||
1236 | ret_val = -ENOENT; | ||
1237 | } | ||
969 | break; | 1238 | break; |
970 | #endif /* IPv6 */ | 1239 | #endif /* IPv6 */ |
971 | default: | 1240 | default: |
@@ -987,7 +1256,16 @@ req_setattr_return: | |||
987 | */ | 1256 | */ |
988 | void netlbl_req_delattr(struct request_sock *req) | 1257 | void netlbl_req_delattr(struct request_sock *req) |
989 | { | 1258 | { |
990 | cipso_v4_req_delattr(req); | 1259 | switch (req->rsk_ops->family) { |
1260 | case AF_INET: | ||
1261 | cipso_v4_req_delattr(req); | ||
1262 | break; | ||
1263 | #if IS_ENABLED(CONFIG_IPV6) | ||
1264 | case AF_INET6: | ||
1265 | calipso_req_delattr(req); | ||
1266 | break; | ||
1267 | #endif /* IPv6 */ | ||
1268 | } | ||
991 | } | 1269 | } |
992 | 1270 | ||
993 | /** | 1271 | /** |
@@ -1007,13 +1285,17 @@ int netlbl_skbuff_setattr(struct sk_buff *skb, | |||
1007 | { | 1285 | { |
1008 | int ret_val; | 1286 | int ret_val; |
1009 | struct iphdr *hdr4; | 1287 | struct iphdr *hdr4; |
1288 | #if IS_ENABLED(CONFIG_IPV6) | ||
1289 | struct ipv6hdr *hdr6; | ||
1290 | #endif | ||
1010 | struct netlbl_dommap_def *entry; | 1291 | struct netlbl_dommap_def *entry; |
1011 | 1292 | ||
1012 | rcu_read_lock(); | 1293 | rcu_read_lock(); |
1013 | switch (family) { | 1294 | switch (family) { |
1014 | case AF_INET: | 1295 | case AF_INET: |
1015 | hdr4 = ip_hdr(skb); | 1296 | hdr4 = ip_hdr(skb); |
1016 | entry = netlbl_domhsh_getentry_af4(secattr->domain,hdr4->daddr); | 1297 | entry = netlbl_domhsh_getentry_af4(secattr->domain, |
1298 | hdr4->daddr); | ||
1017 | if (entry == NULL) { | 1299 | if (entry == NULL) { |
1018 | ret_val = -ENOENT; | 1300 | ret_val = -ENOENT; |
1019 | goto skbuff_setattr_return; | 1301 | goto skbuff_setattr_return; |
@@ -1034,9 +1316,26 @@ int netlbl_skbuff_setattr(struct sk_buff *skb, | |||
1034 | break; | 1316 | break; |
1035 | #if IS_ENABLED(CONFIG_IPV6) | 1317 | #if IS_ENABLED(CONFIG_IPV6) |
1036 | case AF_INET6: | 1318 | case AF_INET6: |
1037 | /* since we don't support any IPv6 labeling protocols right | 1319 | hdr6 = ipv6_hdr(skb); |
1038 | * now we can optimize everything away until we do */ | 1320 | entry = netlbl_domhsh_getentry_af6(secattr->domain, |
1039 | ret_val = 0; | 1321 | &hdr6->daddr); |
1322 | if (entry == NULL) { | ||
1323 | ret_val = -ENOENT; | ||
1324 | goto skbuff_setattr_return; | ||
1325 | } | ||
1326 | switch (entry->type) { | ||
1327 | case NETLBL_NLTYPE_CALIPSO: | ||
1328 | ret_val = calipso_skbuff_setattr(skb, entry->calipso, | ||
1329 | secattr); | ||
1330 | break; | ||
1331 | case NETLBL_NLTYPE_UNLABELED: | ||
1332 | /* just delete the protocols we support for right now | ||
1333 | * but we could remove other protocols if needed */ | ||
1334 | ret_val = calipso_skbuff_delattr(skb); | ||
1335 | break; | ||
1336 | default: | ||
1337 | ret_val = -ENOENT; | ||
1338 | } | ||
1040 | break; | 1339 | break; |
1041 | #endif /* IPv6 */ | 1340 | #endif /* IPv6 */ |
1042 | default: | 1341 | default: |
@@ -1075,6 +1374,9 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb, | |||
1075 | break; | 1374 | break; |
1076 | #if IS_ENABLED(CONFIG_IPV6) | 1375 | #if IS_ENABLED(CONFIG_IPV6) |
1077 | case AF_INET6: | 1376 | case AF_INET6: |
1377 | ptr = calipso_optptr(skb); | ||
1378 | if (ptr && calipso_getattr(ptr, secattr) == 0) | ||
1379 | return 0; | ||
1078 | break; | 1380 | break; |
1079 | #endif /* IPv6 */ | 1381 | #endif /* IPv6 */ |
1080 | } | 1382 | } |
@@ -1085,6 +1387,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb, | |||
1085 | /** | 1387 | /** |
1086 | * netlbl_skbuff_err - Handle a LSM error on a sk_buff | 1388 | * netlbl_skbuff_err - Handle a LSM error on a sk_buff |
1087 | * @skb: the packet | 1389 | * @skb: the packet |
1390 | * @family: the family | ||
1088 | * @error: the error code | 1391 | * @error: the error code |
1089 | * @gateway: true if host is acting as a gateway, false otherwise | 1392 | * @gateway: true if host is acting as a gateway, false otherwise |
1090 | * | 1393 | * |
@@ -1094,10 +1397,14 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb, | |||
1094 | * according to the packet's labeling protocol. | 1397 | * according to the packet's labeling protocol. |
1095 | * | 1398 | * |
1096 | */ | 1399 | */ |
1097 | void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway) | 1400 | void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway) |
1098 | { | 1401 | { |
1099 | if (cipso_v4_optptr(skb)) | 1402 | switch (family) { |
1100 | cipso_v4_error(skb, error, gateway); | 1403 | case AF_INET: |
1404 | if (cipso_v4_optptr(skb)) | ||
1405 | cipso_v4_error(skb, error, gateway); | ||
1406 | break; | ||
1407 | } | ||
1101 | } | 1408 | } |
1102 | 1409 | ||
1103 | /** | 1410 | /** |
@@ -1112,11 +1419,15 @@ void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway) | |||
1112 | void netlbl_cache_invalidate(void) | 1419 | void netlbl_cache_invalidate(void) |
1113 | { | 1420 | { |
1114 | cipso_v4_cache_invalidate(); | 1421 | cipso_v4_cache_invalidate(); |
1422 | #if IS_ENABLED(CONFIG_IPV6) | ||
1423 | calipso_cache_invalidate(); | ||
1424 | #endif /* IPv6 */ | ||
1115 | } | 1425 | } |
1116 | 1426 | ||
1117 | /** | 1427 | /** |
1118 | * netlbl_cache_add - Add an entry to a NetLabel protocol cache | 1428 | * netlbl_cache_add - Add an entry to a NetLabel protocol cache |
1119 | * @skb: the packet | 1429 | * @skb: the packet |
1430 | * @family: the family | ||
1120 | * @secattr: the packet's security attributes | 1431 | * @secattr: the packet's security attributes |
1121 | * | 1432 | * |
1122 | * Description: | 1433 | * Description: |
@@ -1125,7 +1436,7 @@ void netlbl_cache_invalidate(void) | |||
1125 | * values on error. | 1436 | * values on error. |
1126 | * | 1437 | * |
1127 | */ | 1438 | */ |
1128 | int netlbl_cache_add(const struct sk_buff *skb, | 1439 | int netlbl_cache_add(const struct sk_buff *skb, u16 family, |
1129 | const struct netlbl_lsm_secattr *secattr) | 1440 | const struct netlbl_lsm_secattr *secattr) |
1130 | { | 1441 | { |
1131 | unsigned char *ptr; | 1442 | unsigned char *ptr; |
@@ -1133,10 +1444,20 @@ int netlbl_cache_add(const struct sk_buff *skb, | |||
1133 | if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0) | 1444 | if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0) |
1134 | return -ENOMSG; | 1445 | return -ENOMSG; |
1135 | 1446 | ||
1136 | ptr = cipso_v4_optptr(skb); | 1447 | switch (family) { |
1137 | if (ptr) | 1448 | case AF_INET: |
1138 | return cipso_v4_cache_add(ptr, secattr); | 1449 | ptr = cipso_v4_optptr(skb); |
1139 | 1450 | if (ptr) | |
1451 | return cipso_v4_cache_add(ptr, secattr); | ||
1452 | break; | ||
1453 | #if IS_ENABLED(CONFIG_IPV6) | ||
1454 | case AF_INET6: | ||
1455 | ptr = calipso_optptr(skb); | ||
1456 | if (ptr) | ||
1457 | return calipso_cache_add(ptr, secattr); | ||
1458 | break; | ||
1459 | #endif /* IPv6 */ | ||
1460 | } | ||
1140 | return -ENOMSG; | 1461 | return -ENOMSG; |
1141 | } | 1462 | } |
1142 | 1463 | ||
@@ -1161,6 +1482,7 @@ struct audit_buffer *netlbl_audit_start(int type, | |||
1161 | { | 1482 | { |
1162 | return netlbl_audit_start_common(type, audit_info); | 1483 | return netlbl_audit_start_common(type, audit_info); |
1163 | } | 1484 | } |
1485 | EXPORT_SYMBOL(netlbl_audit_start); | ||
1164 | 1486 | ||
1165 | /* | 1487 | /* |
1166 | * Setup Functions | 1488 | * Setup Functions |
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 13f777f20995..f85d0e07af2d 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c | |||
@@ -41,8 +41,10 @@ | |||
41 | #include <net/ipv6.h> | 41 | #include <net/ipv6.h> |
42 | #include <net/netlabel.h> | 42 | #include <net/netlabel.h> |
43 | #include <net/cipso_ipv4.h> | 43 | #include <net/cipso_ipv4.h> |
44 | #include <net/calipso.h> | ||
44 | #include <linux/atomic.h> | 45 | #include <linux/atomic.h> |
45 | 46 | ||
47 | #include "netlabel_calipso.h" | ||
46 | #include "netlabel_domainhash.h" | 48 | #include "netlabel_domainhash.h" |
47 | #include "netlabel_user.h" | 49 | #include "netlabel_user.h" |
48 | #include "netlabel_mgmt.h" | 50 | #include "netlabel_mgmt.h" |
@@ -72,6 +74,8 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = { | |||
72 | [NLBL_MGMT_A_PROTOCOL] = { .type = NLA_U32 }, | 74 | [NLBL_MGMT_A_PROTOCOL] = { .type = NLA_U32 }, |
73 | [NLBL_MGMT_A_VERSION] = { .type = NLA_U32 }, | 75 | [NLBL_MGMT_A_VERSION] = { .type = NLA_U32 }, |
74 | [NLBL_MGMT_A_CV4DOI] = { .type = NLA_U32 }, | 76 | [NLBL_MGMT_A_CV4DOI] = { .type = NLA_U32 }, |
77 | [NLBL_MGMT_A_FAMILY] = { .type = NLA_U16 }, | ||
78 | [NLBL_MGMT_A_CLPDOI] = { .type = NLA_U32 }, | ||
75 | }; | 79 | }; |
76 | 80 | ||
77 | /* | 81 | /* |
@@ -95,6 +99,9 @@ static int netlbl_mgmt_add_common(struct genl_info *info, | |||
95 | int ret_val = -EINVAL; | 99 | int ret_val = -EINVAL; |
96 | struct netlbl_domaddr_map *addrmap = NULL; | 100 | struct netlbl_domaddr_map *addrmap = NULL; |
97 | struct cipso_v4_doi *cipsov4 = NULL; | 101 | struct cipso_v4_doi *cipsov4 = NULL; |
102 | #if IS_ENABLED(CONFIG_IPV6) | ||
103 | struct calipso_doi *calipso = NULL; | ||
104 | #endif | ||
98 | u32 tmp_val; | 105 | u32 tmp_val; |
99 | struct netlbl_dom_map *entry = kzalloc(sizeof(*entry), GFP_KERNEL); | 106 | struct netlbl_dom_map *entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
100 | 107 | ||
@@ -119,6 +126,11 @@ static int netlbl_mgmt_add_common(struct genl_info *info, | |||
119 | 126 | ||
120 | switch (entry->def.type) { | 127 | switch (entry->def.type) { |
121 | case NETLBL_NLTYPE_UNLABELED: | 128 | case NETLBL_NLTYPE_UNLABELED: |
129 | if (info->attrs[NLBL_MGMT_A_FAMILY]) | ||
130 | entry->family = | ||
131 | nla_get_u16(info->attrs[NLBL_MGMT_A_FAMILY]); | ||
132 | else | ||
133 | entry->family = AF_UNSPEC; | ||
122 | break; | 134 | break; |
123 | case NETLBL_NLTYPE_CIPSOV4: | 135 | case NETLBL_NLTYPE_CIPSOV4: |
124 | if (!info->attrs[NLBL_MGMT_A_CV4DOI]) | 136 | if (!info->attrs[NLBL_MGMT_A_CV4DOI]) |
@@ -128,12 +140,30 @@ static int netlbl_mgmt_add_common(struct genl_info *info, | |||
128 | cipsov4 = cipso_v4_doi_getdef(tmp_val); | 140 | cipsov4 = cipso_v4_doi_getdef(tmp_val); |
129 | if (cipsov4 == NULL) | 141 | if (cipsov4 == NULL) |
130 | goto add_free_domain; | 142 | goto add_free_domain; |
143 | entry->family = AF_INET; | ||
131 | entry->def.cipso = cipsov4; | 144 | entry->def.cipso = cipsov4; |
132 | break; | 145 | break; |
146 | #if IS_ENABLED(CONFIG_IPV6) | ||
147 | case NETLBL_NLTYPE_CALIPSO: | ||
148 | if (!info->attrs[NLBL_MGMT_A_CLPDOI]) | ||
149 | goto add_free_domain; | ||
150 | |||
151 | tmp_val = nla_get_u32(info->attrs[NLBL_MGMT_A_CLPDOI]); | ||
152 | calipso = calipso_doi_getdef(tmp_val); | ||
153 | if (calipso == NULL) | ||
154 | goto add_free_domain; | ||
155 | entry->family = AF_INET6; | ||
156 | entry->def.calipso = calipso; | ||
157 | break; | ||
158 | #endif /* IPv6 */ | ||
133 | default: | 159 | default: |
134 | goto add_free_domain; | 160 | goto add_free_domain; |
135 | } | 161 | } |
136 | 162 | ||
163 | if ((entry->family == AF_INET && info->attrs[NLBL_MGMT_A_IPV6ADDR]) || | ||
164 | (entry->family == AF_INET6 && info->attrs[NLBL_MGMT_A_IPV4ADDR])) | ||
165 | goto add_doi_put_def; | ||
166 | |||
137 | if (info->attrs[NLBL_MGMT_A_IPV4ADDR]) { | 167 | if (info->attrs[NLBL_MGMT_A_IPV4ADDR]) { |
138 | struct in_addr *addr; | 168 | struct in_addr *addr; |
139 | struct in_addr *mask; | 169 | struct in_addr *mask; |
@@ -178,6 +208,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info, | |||
178 | goto add_free_addrmap; | 208 | goto add_free_addrmap; |
179 | } | 209 | } |
180 | 210 | ||
211 | entry->family = AF_INET; | ||
181 | entry->def.type = NETLBL_NLTYPE_ADDRSELECT; | 212 | entry->def.type = NETLBL_NLTYPE_ADDRSELECT; |
182 | entry->def.addrsel = addrmap; | 213 | entry->def.addrsel = addrmap; |
183 | #if IS_ENABLED(CONFIG_IPV6) | 214 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -220,6 +251,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info, | |||
220 | map->list.mask = *mask; | 251 | map->list.mask = *mask; |
221 | map->list.valid = 1; | 252 | map->list.valid = 1; |
222 | map->def.type = entry->def.type; | 253 | map->def.type = entry->def.type; |
254 | if (calipso) | ||
255 | map->def.calipso = calipso; | ||
223 | 256 | ||
224 | ret_val = netlbl_af6list_add(&map->list, &addrmap->list6); | 257 | ret_val = netlbl_af6list_add(&map->list, &addrmap->list6); |
225 | if (ret_val != 0) { | 258 | if (ret_val != 0) { |
@@ -227,6 +260,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info, | |||
227 | goto add_free_addrmap; | 260 | goto add_free_addrmap; |
228 | } | 261 | } |
229 | 262 | ||
263 | entry->family = AF_INET6; | ||
230 | entry->def.type = NETLBL_NLTYPE_ADDRSELECT; | 264 | entry->def.type = NETLBL_NLTYPE_ADDRSELECT; |
231 | entry->def.addrsel = addrmap; | 265 | entry->def.addrsel = addrmap; |
232 | #endif /* IPv6 */ | 266 | #endif /* IPv6 */ |
@@ -242,6 +276,9 @@ add_free_addrmap: | |||
242 | kfree(addrmap); | 276 | kfree(addrmap); |
243 | add_doi_put_def: | 277 | add_doi_put_def: |
244 | cipso_v4_doi_putdef(cipsov4); | 278 | cipso_v4_doi_putdef(cipsov4); |
279 | #if IS_ENABLED(CONFIG_IPV6) | ||
280 | calipso_doi_putdef(calipso); | ||
281 | #endif | ||
245 | add_free_domain: | 282 | add_free_domain: |
246 | kfree(entry->domain); | 283 | kfree(entry->domain); |
247 | add_free_entry: | 284 | add_free_entry: |
@@ -278,6 +315,10 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb, | |||
278 | return ret_val; | 315 | return ret_val; |
279 | } | 316 | } |
280 | 317 | ||
318 | ret_val = nla_put_u16(skb, NLBL_MGMT_A_FAMILY, entry->family); | ||
319 | if (ret_val != 0) | ||
320 | return ret_val; | ||
321 | |||
281 | switch (entry->def.type) { | 322 | switch (entry->def.type) { |
282 | case NETLBL_NLTYPE_ADDRSELECT: | 323 | case NETLBL_NLTYPE_ADDRSELECT: |
283 | nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST); | 324 | nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST); |
@@ -340,6 +381,15 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb, | |||
340 | if (ret_val != 0) | 381 | if (ret_val != 0) |
341 | return ret_val; | 382 | return ret_val; |
342 | 383 | ||
384 | switch (map6->def.type) { | ||
385 | case NETLBL_NLTYPE_CALIPSO: | ||
386 | ret_val = nla_put_u32(skb, NLBL_MGMT_A_CLPDOI, | ||
387 | map6->def.calipso->doi); | ||
388 | if (ret_val != 0) | ||
389 | return ret_val; | ||
390 | break; | ||
391 | } | ||
392 | |||
343 | nla_nest_end(skb, nla_b); | 393 | nla_nest_end(skb, nla_b); |
344 | } | 394 | } |
345 | #endif /* IPv6 */ | 395 | #endif /* IPv6 */ |
@@ -347,15 +397,25 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb, | |||
347 | nla_nest_end(skb, nla_a); | 397 | nla_nest_end(skb, nla_a); |
348 | break; | 398 | break; |
349 | case NETLBL_NLTYPE_UNLABELED: | 399 | case NETLBL_NLTYPE_UNLABELED: |
350 | ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type); | 400 | ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, |
401 | entry->def.type); | ||
351 | break; | 402 | break; |
352 | case NETLBL_NLTYPE_CIPSOV4: | 403 | case NETLBL_NLTYPE_CIPSOV4: |
353 | ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type); | 404 | ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, |
405 | entry->def.type); | ||
354 | if (ret_val != 0) | 406 | if (ret_val != 0) |
355 | return ret_val; | 407 | return ret_val; |
356 | ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI, | 408 | ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI, |
357 | entry->def.cipso->doi); | 409 | entry->def.cipso->doi); |
358 | break; | 410 | break; |
411 | case NETLBL_NLTYPE_CALIPSO: | ||
412 | ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, | ||
413 | entry->def.type); | ||
414 | if (ret_val != 0) | ||
415 | return ret_val; | ||
416 | ret_val = nla_put_u32(skb, NLBL_MGMT_A_CLPDOI, | ||
417 | entry->def.calipso->doi); | ||
418 | break; | ||
359 | } | 419 | } |
360 | 420 | ||
361 | return ret_val; | 421 | return ret_val; |
@@ -418,7 +478,7 @@ static int netlbl_mgmt_remove(struct sk_buff *skb, struct genl_info *info) | |||
418 | netlbl_netlink_auditinfo(skb, &audit_info); | 478 | netlbl_netlink_auditinfo(skb, &audit_info); |
419 | 479 | ||
420 | domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]); | 480 | domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]); |
421 | return netlbl_domhsh_remove(domain, &audit_info); | 481 | return netlbl_domhsh_remove(domain, AF_UNSPEC, &audit_info); |
422 | } | 482 | } |
423 | 483 | ||
424 | /** | 484 | /** |
@@ -536,7 +596,7 @@ static int netlbl_mgmt_removedef(struct sk_buff *skb, struct genl_info *info) | |||
536 | 596 | ||
537 | netlbl_netlink_auditinfo(skb, &audit_info); | 597 | netlbl_netlink_auditinfo(skb, &audit_info); |
538 | 598 | ||
539 | return netlbl_domhsh_remove_default(&audit_info); | 599 | return netlbl_domhsh_remove_default(AF_UNSPEC, &audit_info); |
540 | } | 600 | } |
541 | 601 | ||
542 | /** | 602 | /** |
@@ -556,6 +616,12 @@ static int netlbl_mgmt_listdef(struct sk_buff *skb, struct genl_info *info) | |||
556 | struct sk_buff *ans_skb = NULL; | 616 | struct sk_buff *ans_skb = NULL; |
557 | void *data; | 617 | void *data; |
558 | struct netlbl_dom_map *entry; | 618 | struct netlbl_dom_map *entry; |
619 | u16 family; | ||
620 | |||
621 | if (info->attrs[NLBL_MGMT_A_FAMILY]) | ||
622 | family = nla_get_u16(info->attrs[NLBL_MGMT_A_FAMILY]); | ||
623 | else | ||
624 | family = AF_INET; | ||
559 | 625 | ||
560 | ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 626 | ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
561 | if (ans_skb == NULL) | 627 | if (ans_skb == NULL) |
@@ -566,7 +632,7 @@ static int netlbl_mgmt_listdef(struct sk_buff *skb, struct genl_info *info) | |||
566 | goto listdef_failure; | 632 | goto listdef_failure; |
567 | 633 | ||
568 | rcu_read_lock(); | 634 | rcu_read_lock(); |
569 | entry = netlbl_domhsh_getentry(NULL); | 635 | entry = netlbl_domhsh_getentry(NULL, family); |
570 | if (entry == NULL) { | 636 | if (entry == NULL) { |
571 | ret_val = -ENOENT; | 637 | ret_val = -ENOENT; |
572 | goto listdef_failure_lock; | 638 | goto listdef_failure_lock; |
@@ -651,6 +717,15 @@ static int netlbl_mgmt_protocols(struct sk_buff *skb, | |||
651 | goto protocols_return; | 717 | goto protocols_return; |
652 | protos_sent++; | 718 | protos_sent++; |
653 | } | 719 | } |
720 | #if IS_ENABLED(CONFIG_IPV6) | ||
721 | if (protos_sent == 2) { | ||
722 | if (netlbl_mgmt_protocols_cb(skb, | ||
723 | cb, | ||
724 | NETLBL_NLTYPE_CALIPSO) < 0) | ||
725 | goto protocols_return; | ||
726 | protos_sent++; | ||
727 | } | ||
728 | #endif | ||
654 | 729 | ||
655 | protocols_return: | 730 | protocols_return: |
656 | cb->args[0] = protos_sent; | 731 | cb->args[0] = protos_sent; |
diff --git a/net/netlabel/netlabel_mgmt.h b/net/netlabel/netlabel_mgmt.h index 8b6e1ab62b48..ea01e42bca78 100644 --- a/net/netlabel/netlabel_mgmt.h +++ b/net/netlabel/netlabel_mgmt.h | |||
@@ -58,7 +58,10 @@ | |||
58 | * | 58 | * |
59 | * NLBL_MGMT_A_CV4DOI | 59 | * NLBL_MGMT_A_CV4DOI |
60 | * | 60 | * |
61 | * If using NETLBL_NLTYPE_UNLABELED no other attributes are required. | 61 | * If using NETLBL_NLTYPE_UNLABELED no other attributes are required, |
62 | * however the following attribute may optionally be sent: | ||
63 | * | ||
64 | * NLBL_MGMT_A_FAMILY | ||
62 | * | 65 | * |
63 | * o REMOVE: | 66 | * o REMOVE: |
64 | * Sent by an application to remove a domain mapping from the NetLabel | 67 | * Sent by an application to remove a domain mapping from the NetLabel |
@@ -77,6 +80,7 @@ | |||
77 | * Required attributes: | 80 | * Required attributes: |
78 | * | 81 | * |
79 | * NLBL_MGMT_A_DOMAIN | 82 | * NLBL_MGMT_A_DOMAIN |
83 | * NLBL_MGMT_A_FAMILY | ||
80 | * | 84 | * |
81 | * If the IP address selectors are not used the following attribute is | 85 | * If the IP address selectors are not used the following attribute is |
82 | * required: | 86 | * required: |
@@ -108,7 +112,10 @@ | |||
108 | * | 112 | * |
109 | * NLBL_MGMT_A_CV4DOI | 113 | * NLBL_MGMT_A_CV4DOI |
110 | * | 114 | * |
111 | * If using NETLBL_NLTYPE_UNLABELED no other attributes are required. | 115 | * If using NETLBL_NLTYPE_UNLABELED no other attributes are required, |
116 | * however the following attribute may optionally be sent: | ||
117 | * | ||
118 | * NLBL_MGMT_A_FAMILY | ||
112 | * | 119 | * |
113 | * o REMOVEDEF: | 120 | * o REMOVEDEF: |
114 | * Sent by an application to remove the default domain mapping from the | 121 | * Sent by an application to remove the default domain mapping from the |
@@ -117,13 +124,17 @@ | |||
117 | * o LISTDEF: | 124 | * o LISTDEF: |
118 | * This message can be sent either from an application or by the kernel in | 125 | * This message can be sent either from an application or by the kernel in |
119 | * response to an application generated LISTDEF message. When sent by an | 126 | * response to an application generated LISTDEF message. When sent by an |
120 | * application there is no payload. On success the kernel should send a | 127 | * application there may be an optional payload. |
121 | * response using the following format. | ||
122 | * | 128 | * |
123 | * If the IP address selectors are not used the following attribute is | 129 | * NLBL_MGMT_A_FAMILY |
130 | * | ||
131 | * On success the kernel should send a response using the following format: | ||
132 | * | ||
133 | * If the IP address selectors are not used the following attributes are | ||
124 | * required: | 134 | * required: |
125 | * | 135 | * |
126 | * NLBL_MGMT_A_PROTOCOL | 136 | * NLBL_MGMT_A_PROTOCOL |
137 | * NLBL_MGMT_A_FAMILY | ||
127 | * | 138 | * |
128 | * If the IP address selectors are used then the following attritbute is | 139 | * If the IP address selectors are used then the following attritbute is |
129 | * required: | 140 | * required: |
@@ -209,6 +220,12 @@ enum { | |||
209 | /* (NLA_NESTED) | 220 | /* (NLA_NESTED) |
210 | * the selector list, there must be at least one | 221 | * the selector list, there must be at least one |
211 | * NLBL_MGMT_A_ADDRSELECTOR attribute */ | 222 | * NLBL_MGMT_A_ADDRSELECTOR attribute */ |
223 | NLBL_MGMT_A_FAMILY, | ||
224 | /* (NLA_U16) | ||
225 | * The address family */ | ||
226 | NLBL_MGMT_A_CLPDOI, | ||
227 | /* (NLA_U32) | ||
228 | * the CALIPSO DOI value */ | ||
212 | __NLBL_MGMT_A_MAX, | 229 | __NLBL_MGMT_A_MAX, |
213 | }; | 230 | }; |
214 | #define NLBL_MGMT_A_MAX (__NLBL_MGMT_A_MAX - 1) | 231 | #define NLBL_MGMT_A_MAX (__NLBL_MGMT_A_MAX - 1) |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 9eaa9a1e8629..4528cff9138b 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -116,8 +116,8 @@ struct netlbl_unlhsh_walk_arg { | |||
116 | static DEFINE_SPINLOCK(netlbl_unlhsh_lock); | 116 | static DEFINE_SPINLOCK(netlbl_unlhsh_lock); |
117 | #define netlbl_unlhsh_rcu_deref(p) \ | 117 | #define netlbl_unlhsh_rcu_deref(p) \ |
118 | rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock)) | 118 | rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock)) |
119 | static struct netlbl_unlhsh_tbl *netlbl_unlhsh; | 119 | static struct netlbl_unlhsh_tbl __rcu *netlbl_unlhsh; |
120 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_def; | 120 | static struct netlbl_unlhsh_iface __rcu *netlbl_unlhsh_def; |
121 | 121 | ||
122 | /* Accept unlabeled packets flag */ | 122 | /* Accept unlabeled packets flag */ |
123 | static u8 netlabel_unlabel_acceptflg; | 123 | static u8 netlabel_unlabel_acceptflg; |
@@ -1537,6 +1537,7 @@ int __init netlbl_unlabel_defconf(void) | |||
1537 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | 1537 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
1538 | if (entry == NULL) | 1538 | if (entry == NULL) |
1539 | return -ENOMEM; | 1539 | return -ENOMEM; |
1540 | entry->family = AF_UNSPEC; | ||
1540 | entry->def.type = NETLBL_NLTYPE_UNLABELED; | 1541 | entry->def.type = NETLBL_NLTYPE_UNLABELED; |
1541 | ret_val = netlbl_domhsh_add_default(entry, &audit_info); | 1542 | ret_val = netlbl_domhsh_add_default(entry, &audit_info); |
1542 | if (ret_val != 0) | 1543 | if (ret_val != 0) |
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c index adf8b7900da2..58495f44c62a 100644 --- a/net/netlabel/netlabel_user.c +++ b/net/netlabel/netlabel_user.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include "netlabel_mgmt.h" | 44 | #include "netlabel_mgmt.h" |
45 | #include "netlabel_unlabeled.h" | 45 | #include "netlabel_unlabeled.h" |
46 | #include "netlabel_cipso_v4.h" | 46 | #include "netlabel_cipso_v4.h" |
47 | #include "netlabel_calipso.h" | ||
47 | #include "netlabel_user.h" | 48 | #include "netlabel_user.h" |
48 | 49 | ||
49 | /* | 50 | /* |
@@ -71,6 +72,10 @@ int __init netlbl_netlink_init(void) | |||
71 | if (ret_val != 0) | 72 | if (ret_val != 0) |
72 | return ret_val; | 73 | return ret_val; |
73 | 74 | ||
75 | ret_val = netlbl_calipso_genl_init(); | ||
76 | if (ret_val != 0) | ||
77 | return ret_val; | ||
78 | |||
74 | return netlbl_unlabel_genl_init(); | 79 | return netlbl_unlabel_genl_init(); |
75 | } | 80 | } |
76 | 81 | ||
diff --git a/net/sysctl_net.c b/net/sysctl_net.c index ed98c1fc3de1..46a71c701e7c 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c | |||
@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head, | |||
46 | kgid_t root_gid = make_kgid(net->user_ns, 0); | 46 | kgid_t root_gid = make_kgid(net->user_ns, 0); |
47 | 47 | ||
48 | /* Allow network administrator to have same access as root. */ | 48 | /* Allow network administrator to have same access as root. */ |
49 | if (ns_capable(net->user_ns, CAP_NET_ADMIN) || | 49 | if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN) || |
50 | uid_eq(root_uid, current_euid())) { | 50 | uid_eq(root_uid, current_euid())) { |
51 | int mode = (table->mode >> 6) & 7; | 51 | int mode = (table->mode >> 6) & 7; |
52 | return (mode << 6) | (mode << 3) | mode; | 52 | return (mode << 6) | (mode << 3) | mode; |
diff --git a/samples/Kconfig b/samples/Kconfig index 27a24571e96c..85c405fcccb0 100644 --- a/samples/Kconfig +++ b/samples/Kconfig | |||
@@ -92,4 +92,11 @@ config SAMPLE_CONNECTOR | |||
92 | with it. | 92 | with it. |
93 | See also Documentation/connector/connector.txt | 93 | See also Documentation/connector/connector.txt |
94 | 94 | ||
95 | config SAMPLE_SECCOMP | ||
96 | tristate "Build seccomp sample code -- loadable modules only" | ||
97 | depends on SECCOMP_FILTER && m | ||
98 | help | ||
99 | Build samples of seccomp filters using various methods of | ||
100 | BPF filter construction. | ||
101 | |||
95 | endif # SAMPLES | 102 | endif # SAMPLES |
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index 1b4e4b8f5e47..ae7ff6f24f36 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | # kbuild trick to avoid linker error. Can be omitted if a module is built. | 1 | # kbuild trick to avoid linker error. Can be omitted if a module is built. |
2 | obj- := dummy.o | 2 | obj- := dummy.o |
3 | 3 | ||
4 | hostprogs-$(CONFIG_SECCOMP_FILTER) := bpf-fancy dropper bpf-direct | 4 | hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct |
5 | 5 | ||
6 | HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include | 6 | HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include |
7 | HOSTCFLAGS_bpf-fancy.o += -idirafter $(objtree)/include | 7 | HOSTCFLAGS_bpf-fancy.o += -idirafter $(objtree)/include |
diff --git a/scripts/sign-file.c b/scripts/sign-file.c index d912d5a56a5e..53af6dc3e6c1 100755 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Sign a module file using the given key. | 1 | /* Sign a module file using the given key. |
2 | * | 2 | * |
3 | * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright © 2014-2016 Red Hat, Inc. All Rights Reserved. |
4 | * Copyright © 2015 Intel Corporation. | 4 | * Copyright © 2015 Intel Corporation. |
5 | * Copyright © 2016 Hewlett Packard Enterprise Development LP | 5 | * Copyright © 2016 Hewlett Packard Enterprise Development LP |
6 | * | 6 | * |
@@ -167,19 +167,37 @@ static EVP_PKEY *read_private_key(const char *private_key_name) | |||
167 | 167 | ||
168 | static X509 *read_x509(const char *x509_name) | 168 | static X509 *read_x509(const char *x509_name) |
169 | { | 169 | { |
170 | unsigned char buf[2]; | ||
170 | X509 *x509; | 171 | X509 *x509; |
171 | BIO *b; | 172 | BIO *b; |
173 | int n; | ||
172 | 174 | ||
173 | b = BIO_new_file(x509_name, "rb"); | 175 | b = BIO_new_file(x509_name, "rb"); |
174 | ERR(!b, "%s", x509_name); | 176 | ERR(!b, "%s", x509_name); |
175 | x509 = d2i_X509_bio(b, NULL); /* Binary encoded X.509 */ | 177 | |
176 | if (!x509) { | 178 | /* Look at the first two bytes of the file to determine the encoding */ |
177 | ERR(BIO_reset(b) != 1, "%s", x509_name); | 179 | n = BIO_read(b, buf, 2); |
178 | x509 = PEM_read_bio_X509(b, NULL, NULL, | 180 | if (n != 2) { |
179 | NULL); /* PEM encoded X.509 */ | 181 | if (BIO_should_retry(b)) { |
180 | if (x509) | 182 | fprintf(stderr, "%s: Read wanted retry\n", x509_name); |
181 | drain_openssl_errors(); | 183 | exit(1); |
184 | } | ||
185 | if (n >= 0) { | ||
186 | fprintf(stderr, "%s: Short read\n", x509_name); | ||
187 | exit(1); | ||
188 | } | ||
189 | ERR(1, "%s", x509_name); | ||
182 | } | 190 | } |
191 | |||
192 | ERR(BIO_reset(b) != 0, "%s", x509_name); | ||
193 | |||
194 | if (buf[0] == 0x30 && buf[1] >= 0x81 && buf[1] <= 0x84) | ||
195 | /* Assume raw DER encoded X.509 */ | ||
196 | x509 = d2i_X509_bio(b, NULL); | ||
197 | else | ||
198 | /* Assume PEM encoded X.509 */ | ||
199 | x509 = PEM_read_bio_X509(b, NULL, NULL, NULL); | ||
200 | |||
183 | BIO_free(b); | 201 | BIO_free(b); |
184 | ERR(!x509, "%s", x509_name); | 202 | ERR(!x509, "%s", x509_name); |
185 | 203 | ||
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig index 232469baa94f..be5e9414a295 100644 --- a/security/apparmor/Kconfig +++ b/security/apparmor/Kconfig | |||
@@ -31,13 +31,26 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE | |||
31 | If you are unsure how to answer this question, answer 1. | 31 | If you are unsure how to answer this question, answer 1. |
32 | 32 | ||
33 | config SECURITY_APPARMOR_HASH | 33 | config SECURITY_APPARMOR_HASH |
34 | bool "SHA1 hash of loaded profiles" | 34 | bool "Enable introspection of sha1 hashes for loaded profiles" |
35 | depends on SECURITY_APPARMOR | 35 | depends on SECURITY_APPARMOR |
36 | select CRYPTO | 36 | select CRYPTO |
37 | select CRYPTO_SHA1 | 37 | select CRYPTO_SHA1 |
38 | default y | 38 | default y |
39 | 39 | ||
40 | help | 40 | help |
41 | This option selects whether sha1 hashing is done against loaded | 41 | This option selects whether introspection of loaded policy |
42 | profiles and exported for inspection to user space via the apparmor | 42 | is available to userspace via the apparmor filesystem. |
43 | filesystem. | 43 | |
44 | config SECURITY_APPARMOR_HASH_DEFAULT | ||
45 | bool "Enable policy hash introspection by default" | ||
46 | depends on SECURITY_APPARMOR_HASH | ||
47 | default y | ||
48 | |||
49 | help | ||
50 | This option selects whether sha1 hashing of loaded policy | ||
51 | is enabled by default. The generation of sha1 hashes for | ||
52 | loaded policy provide system administrators a quick way | ||
53 | to verify that policy in the kernel matches what is expected, | ||
54 | however it can slow down policy load on some devices. In | ||
55 | these cases policy hashing can be disabled by default and | ||
56 | enabled only if needed. | ||
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index ad4fa49ad1db..729e595119ed 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c | |||
@@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct seq_file *seq, void *v) | |||
331 | seq_printf(seq, "%.2x", profile->hash[i]); | 331 | seq_printf(seq, "%.2x", profile->hash[i]); |
332 | seq_puts(seq, "\n"); | 332 | seq_puts(seq, "\n"); |
333 | } | 333 | } |
334 | aa_put_profile(profile); | ||
334 | 335 | ||
335 | return 0; | 336 | return 0; |
336 | } | 337 | } |
@@ -379,6 +380,8 @@ void __aa_fs_profile_migrate_dents(struct aa_profile *old, | |||
379 | 380 | ||
380 | for (i = 0; i < AAFS_PROF_SIZEOF; i++) { | 381 | for (i = 0; i < AAFS_PROF_SIZEOF; i++) { |
381 | new->dents[i] = old->dents[i]; | 382 | new->dents[i] = old->dents[i]; |
383 | if (new->dents[i]) | ||
384 | new->dents[i]->d_inode->i_mtime = CURRENT_TIME; | ||
382 | old->dents[i] = NULL; | 385 | old->dents[i] = NULL; |
383 | } | 386 | } |
384 | } | 387 | } |
@@ -550,8 +553,6 @@ fail2: | |||
550 | } | 553 | } |
551 | 554 | ||
552 | 555 | ||
553 | #define list_entry_next(pos, member) \ | ||
554 | list_entry(pos->member.next, typeof(*pos), member) | ||
555 | #define list_entry_is_head(pos, head, member) (&pos->member == (head)) | 556 | #define list_entry_is_head(pos, head, member) (&pos->member == (head)) |
556 | 557 | ||
557 | /** | 558 | /** |
@@ -582,7 +583,7 @@ static struct aa_namespace *__next_namespace(struct aa_namespace *root, | |||
582 | parent = ns->parent; | 583 | parent = ns->parent; |
583 | while (ns != root) { | 584 | while (ns != root) { |
584 | mutex_unlock(&ns->lock); | 585 | mutex_unlock(&ns->lock); |
585 | next = list_entry_next(ns, base.list); | 586 | next = list_next_entry(ns, base.list); |
586 | if (!list_entry_is_head(next, &parent->sub_ns, base.list)) { | 587 | if (!list_entry_is_head(next, &parent->sub_ns, base.list)) { |
587 | mutex_lock(&next->lock); | 588 | mutex_lock(&next->lock); |
588 | return next; | 589 | return next; |
@@ -636,7 +637,7 @@ static struct aa_profile *__next_profile(struct aa_profile *p) | |||
636 | parent = rcu_dereference_protected(p->parent, | 637 | parent = rcu_dereference_protected(p->parent, |
637 | mutex_is_locked(&p->ns->lock)); | 638 | mutex_is_locked(&p->ns->lock)); |
638 | while (parent) { | 639 | while (parent) { |
639 | p = list_entry_next(p, base.list); | 640 | p = list_next_entry(p, base.list); |
640 | if (!list_entry_is_head(p, &parent->base.profiles, base.list)) | 641 | if (!list_entry_is_head(p, &parent->base.profiles, base.list)) |
641 | return p; | 642 | return p; |
642 | p = parent; | 643 | p = parent; |
@@ -645,7 +646,7 @@ static struct aa_profile *__next_profile(struct aa_profile *p) | |||
645 | } | 646 | } |
646 | 647 | ||
647 | /* is next another profile in the namespace */ | 648 | /* is next another profile in the namespace */ |
648 | p = list_entry_next(p, base.list); | 649 | p = list_next_entry(p, base.list); |
649 | if (!list_entry_is_head(p, &ns->base.profiles, base.list)) | 650 | if (!list_entry_is_head(p, &ns->base.profiles, base.list)) |
650 | return p; | 651 | return p; |
651 | 652 | ||
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index 89c78658031f..3a7f1da1425e 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c | |||
@@ -200,7 +200,8 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, | |||
200 | 200 | ||
201 | if (sa->aad->type == AUDIT_APPARMOR_KILL) | 201 | if (sa->aad->type == AUDIT_APPARMOR_KILL) |
202 | (void)send_sig_info(SIGKILL, NULL, | 202 | (void)send_sig_info(SIGKILL, NULL, |
203 | sa->u.tsk ? sa->u.tsk : current); | 203 | sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ? |
204 | sa->u.tsk : current); | ||
204 | 205 | ||
205 | if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) | 206 | if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) |
206 | return complain_error(sa->aad->error); | 207 | return complain_error(sa->aad->error); |
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c index 532471d0b3a0..b75dab0df1cb 100644 --- a/security/apparmor/crypto.c +++ b/security/apparmor/crypto.c | |||
@@ -39,6 +39,9 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, | |||
39 | int error = -ENOMEM; | 39 | int error = -ENOMEM; |
40 | u32 le32_version = cpu_to_le32(version); | 40 | u32 le32_version = cpu_to_le32(version); |
41 | 41 | ||
42 | if (!aa_g_hash_policy) | ||
43 | return 0; | ||
44 | |||
42 | if (!apparmor_tfm) | 45 | if (!apparmor_tfm) |
43 | return 0; | 46 | return 0; |
44 | 47 | ||
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index dc0027b28b04..fc3036b34e51 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c | |||
@@ -346,7 +346,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
346 | file_inode(bprm->file)->i_uid, | 346 | file_inode(bprm->file)->i_uid, |
347 | file_inode(bprm->file)->i_mode | 347 | file_inode(bprm->file)->i_mode |
348 | }; | 348 | }; |
349 | const char *name = NULL, *target = NULL, *info = NULL; | 349 | const char *name = NULL, *info = NULL; |
350 | int error = 0; | 350 | int error = 0; |
351 | 351 | ||
352 | if (bprm->cred_prepared) | 352 | if (bprm->cred_prepared) |
@@ -399,6 +399,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
399 | if (cxt->onexec) { | 399 | if (cxt->onexec) { |
400 | struct file_perms cp; | 400 | struct file_perms cp; |
401 | info = "change_profile onexec"; | 401 | info = "change_profile onexec"; |
402 | new_profile = aa_get_newest_profile(cxt->onexec); | ||
402 | if (!(perms.allow & AA_MAY_ONEXEC)) | 403 | if (!(perms.allow & AA_MAY_ONEXEC)) |
403 | goto audit; | 404 | goto audit; |
404 | 405 | ||
@@ -413,7 +414,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
413 | 414 | ||
414 | if (!(cp.allow & AA_MAY_ONEXEC)) | 415 | if (!(cp.allow & AA_MAY_ONEXEC)) |
415 | goto audit; | 416 | goto audit; |
416 | new_profile = aa_get_newest_profile(cxt->onexec); | ||
417 | goto apply; | 417 | goto apply; |
418 | } | 418 | } |
419 | 419 | ||
@@ -433,7 +433,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
433 | new_profile = aa_get_newest_profile(ns->unconfined); | 433 | new_profile = aa_get_newest_profile(ns->unconfined); |
434 | info = "ux fallback"; | 434 | info = "ux fallback"; |
435 | } else { | 435 | } else { |
436 | error = -ENOENT; | 436 | error = -EACCES; |
437 | info = "profile not found"; | 437 | info = "profile not found"; |
438 | /* remove MAY_EXEC to audit as failure */ | 438 | /* remove MAY_EXEC to audit as failure */ |
439 | perms.allow &= ~MAY_EXEC; | 439 | perms.allow &= ~MAY_EXEC; |
@@ -445,10 +445,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
445 | if (!new_profile) { | 445 | if (!new_profile) { |
446 | error = -ENOMEM; | 446 | error = -ENOMEM; |
447 | info = "could not create null profile"; | 447 | info = "could not create null profile"; |
448 | } else { | 448 | } else |
449 | error = -EACCES; | 449 | error = -EACCES; |
450 | target = new_profile->base.hname; | ||
451 | } | ||
452 | perms.xindex |= AA_X_UNSAFE; | 450 | perms.xindex |= AA_X_UNSAFE; |
453 | } else | 451 | } else |
454 | /* fail exec */ | 452 | /* fail exec */ |
@@ -459,7 +457,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
459 | * fail the exec. | 457 | * fail the exec. |
460 | */ | 458 | */ |
461 | if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) { | 459 | if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) { |
462 | aa_put_profile(new_profile); | ||
463 | error = -EPERM; | 460 | error = -EPERM; |
464 | goto cleanup; | 461 | goto cleanup; |
465 | } | 462 | } |
@@ -474,10 +471,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
474 | 471 | ||
475 | if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { | 472 | if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { |
476 | error = may_change_ptraced_domain(new_profile); | 473 | error = may_change_ptraced_domain(new_profile); |
477 | if (error) { | 474 | if (error) |
478 | aa_put_profile(new_profile); | ||
479 | goto audit; | 475 | goto audit; |
480 | } | ||
481 | } | 476 | } |
482 | 477 | ||
483 | /* Determine if secure exec is needed. | 478 | /* Determine if secure exec is needed. |
@@ -498,7 +493,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) | |||
498 | bprm->unsafe |= AA_SECURE_X_NEEDED; | 493 | bprm->unsafe |= AA_SECURE_X_NEEDED; |
499 | } | 494 | } |
500 | apply: | 495 | apply: |
501 | target = new_profile->base.hname; | ||
502 | /* when transitioning profiles clear unsafe personality bits */ | 496 | /* when transitioning profiles clear unsafe personality bits */ |
503 | bprm->per_clear |= PER_CLEAR_ON_SETID; | 497 | bprm->per_clear |= PER_CLEAR_ON_SETID; |
504 | 498 | ||
@@ -506,15 +500,19 @@ x_clear: | |||
506 | aa_put_profile(cxt->profile); | 500 | aa_put_profile(cxt->profile); |
507 | /* transfer new profile reference will be released when cxt is freed */ | 501 | /* transfer new profile reference will be released when cxt is freed */ |
508 | cxt->profile = new_profile; | 502 | cxt->profile = new_profile; |
503 | new_profile = NULL; | ||
509 | 504 | ||
510 | /* clear out all temporary/transitional state from the context */ | 505 | /* clear out all temporary/transitional state from the context */ |
511 | aa_clear_task_cxt_trans(cxt); | 506 | aa_clear_task_cxt_trans(cxt); |
512 | 507 | ||
513 | audit: | 508 | audit: |
514 | error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC, | 509 | error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC, |
515 | name, target, cond.uid, info, error); | 510 | name, |
511 | new_profile ? new_profile->base.hname : NULL, | ||
512 | cond.uid, info, error); | ||
516 | 513 | ||
517 | cleanup: | 514 | cleanup: |
515 | aa_put_profile(new_profile); | ||
518 | aa_put_profile(profile); | 516 | aa_put_profile(profile); |
519 | kfree(buffer); | 517 | kfree(buffer); |
520 | 518 | ||
diff --git a/security/apparmor/file.c b/security/apparmor/file.c index d186674f973a..4d2af4b01033 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c | |||
@@ -110,7 +110,8 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, | |||
110 | int type = AUDIT_APPARMOR_AUTO; | 110 | int type = AUDIT_APPARMOR_AUTO; |
111 | struct common_audit_data sa; | 111 | struct common_audit_data sa; |
112 | struct apparmor_audit_data aad = {0,}; | 112 | struct apparmor_audit_data aad = {0,}; |
113 | sa.type = LSM_AUDIT_DATA_NONE; | 113 | sa.type = LSM_AUDIT_DATA_TASK; |
114 | sa.u.tsk = NULL; | ||
114 | sa.aad = &aad; | 115 | sa.aad = &aad; |
115 | aad.op = op, | 116 | aad.op = op, |
116 | aad.fs.request = request; | 117 | aad.fs.request = request; |
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h index e4ea62663866..5d721e990876 100644 --- a/security/apparmor/include/apparmor.h +++ b/security/apparmor/include/apparmor.h | |||
@@ -37,6 +37,7 @@ | |||
37 | extern enum audit_mode aa_g_audit; | 37 | extern enum audit_mode aa_g_audit; |
38 | extern bool aa_g_audit_header; | 38 | extern bool aa_g_audit_header; |
39 | extern bool aa_g_debug; | 39 | extern bool aa_g_debug; |
40 | extern bool aa_g_hash_policy; | ||
40 | extern bool aa_g_lock_policy; | 41 | extern bool aa_g_lock_policy; |
41 | extern bool aa_g_logsyscall; | 42 | extern bool aa_g_logsyscall; |
42 | extern bool aa_g_paranoid_load; | 43 | extern bool aa_g_paranoid_load; |
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h index 001c43aa0406..a1c04fe86790 100644 --- a/security/apparmor/include/match.h +++ b/security/apparmor/include/match.h | |||
@@ -62,6 +62,7 @@ struct table_set_header { | |||
62 | #define YYTD_ID_ACCEPT2 6 | 62 | #define YYTD_ID_ACCEPT2 6 |
63 | #define YYTD_ID_NXT 7 | 63 | #define YYTD_ID_NXT 7 |
64 | #define YYTD_ID_TSIZE 8 | 64 | #define YYTD_ID_TSIZE 8 |
65 | #define YYTD_ID_MAX 8 | ||
65 | 66 | ||
66 | #define YYTD_DATA8 1 | 67 | #define YYTD_DATA8 1 |
67 | #define YYTD_DATA16 2 | 68 | #define YYTD_DATA16 2 |
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index c28b0f20ab53..52275f040a5f 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h | |||
@@ -403,6 +403,8 @@ static inline int AUDIT_MODE(struct aa_profile *profile) | |||
403 | return profile->audit; | 403 | return profile->audit; |
404 | } | 404 | } |
405 | 405 | ||
406 | bool policy_view_capable(void); | ||
407 | bool policy_admin_capable(void); | ||
406 | bool aa_may_manage_policy(int op); | 408 | bool aa_may_manage_policy(int op); |
407 | 409 | ||
408 | #endif /* __AA_POLICY_H */ | 410 | #endif /* __AA_POLICY_H */ |
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 7798e1608f4f..41b8cb115801 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -529,7 +529,7 @@ static int apparmor_setprocattr(struct task_struct *task, char *name, | |||
529 | if (!*args) | 529 | if (!*args) |
530 | goto out; | 530 | goto out; |
531 | 531 | ||
532 | arg_size = size - (args - (char *) value); | 532 | arg_size = size - (args - (largs ? largs : (char *) value)); |
533 | if (strcmp(name, "current") == 0) { | 533 | if (strcmp(name, "current") == 0) { |
534 | if (strcmp(command, "changehat") == 0) { | 534 | if (strcmp(command, "changehat") == 0) { |
535 | error = aa_setprocattr_changehat(args, arg_size, | 535 | error = aa_setprocattr_changehat(args, arg_size, |
@@ -671,6 +671,12 @@ enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE; | |||
671 | module_param_call(mode, param_set_mode, param_get_mode, | 671 | module_param_call(mode, param_set_mode, param_get_mode, |
672 | &aa_g_profile_mode, S_IRUSR | S_IWUSR); | 672 | &aa_g_profile_mode, S_IRUSR | S_IWUSR); |
673 | 673 | ||
674 | #ifdef CONFIG_SECURITY_APPARMOR_HASH | ||
675 | /* whether policy verification hashing is enabled */ | ||
676 | bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT); | ||
677 | module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR); | ||
678 | #endif | ||
679 | |||
674 | /* Debug mode */ | 680 | /* Debug mode */ |
675 | bool aa_g_debug; | 681 | bool aa_g_debug; |
676 | module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); | 682 | module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); |
@@ -728,51 +734,49 @@ __setup("apparmor=", apparmor_enabled_setup); | |||
728 | /* set global flag turning off the ability to load policy */ | 734 | /* set global flag turning off the ability to load policy */ |
729 | static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) | 735 | static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) |
730 | { | 736 | { |
731 | if (!capable(CAP_MAC_ADMIN)) | 737 | if (!policy_admin_capable()) |
732 | return -EPERM; | 738 | return -EPERM; |
733 | if (aa_g_lock_policy) | ||
734 | return -EACCES; | ||
735 | return param_set_bool(val, kp); | 739 | return param_set_bool(val, kp); |
736 | } | 740 | } |
737 | 741 | ||
738 | static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) | 742 | static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) |
739 | { | 743 | { |
740 | if (!capable(CAP_MAC_ADMIN)) | 744 | if (!policy_view_capable()) |
741 | return -EPERM; | 745 | return -EPERM; |
742 | return param_get_bool(buffer, kp); | 746 | return param_get_bool(buffer, kp); |
743 | } | 747 | } |
744 | 748 | ||
745 | static int param_set_aabool(const char *val, const struct kernel_param *kp) | 749 | static int param_set_aabool(const char *val, const struct kernel_param *kp) |
746 | { | 750 | { |
747 | if (!capable(CAP_MAC_ADMIN)) | 751 | if (!policy_admin_capable()) |
748 | return -EPERM; | 752 | return -EPERM; |
749 | return param_set_bool(val, kp); | 753 | return param_set_bool(val, kp); |
750 | } | 754 | } |
751 | 755 | ||
752 | static int param_get_aabool(char *buffer, const struct kernel_param *kp) | 756 | static int param_get_aabool(char *buffer, const struct kernel_param *kp) |
753 | { | 757 | { |
754 | if (!capable(CAP_MAC_ADMIN)) | 758 | if (!policy_view_capable()) |
755 | return -EPERM; | 759 | return -EPERM; |
756 | return param_get_bool(buffer, kp); | 760 | return param_get_bool(buffer, kp); |
757 | } | 761 | } |
758 | 762 | ||
759 | static int param_set_aauint(const char *val, const struct kernel_param *kp) | 763 | static int param_set_aauint(const char *val, const struct kernel_param *kp) |
760 | { | 764 | { |
761 | if (!capable(CAP_MAC_ADMIN)) | 765 | if (!policy_admin_capable()) |
762 | return -EPERM; | 766 | return -EPERM; |
763 | return param_set_uint(val, kp); | 767 | return param_set_uint(val, kp); |
764 | } | 768 | } |
765 | 769 | ||
766 | static int param_get_aauint(char *buffer, const struct kernel_param *kp) | 770 | static int param_get_aauint(char *buffer, const struct kernel_param *kp) |
767 | { | 771 | { |
768 | if (!capable(CAP_MAC_ADMIN)) | 772 | if (!policy_view_capable()) |
769 | return -EPERM; | 773 | return -EPERM; |
770 | return param_get_uint(buffer, kp); | 774 | return param_get_uint(buffer, kp); |
771 | } | 775 | } |
772 | 776 | ||
773 | static int param_get_audit(char *buffer, struct kernel_param *kp) | 777 | static int param_get_audit(char *buffer, struct kernel_param *kp) |
774 | { | 778 | { |
775 | if (!capable(CAP_MAC_ADMIN)) | 779 | if (!policy_view_capable()) |
776 | return -EPERM; | 780 | return -EPERM; |
777 | 781 | ||
778 | if (!apparmor_enabled) | 782 | if (!apparmor_enabled) |
@@ -784,7 +788,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp) | |||
784 | static int param_set_audit(const char *val, struct kernel_param *kp) | 788 | static int param_set_audit(const char *val, struct kernel_param *kp) |
785 | { | 789 | { |
786 | int i; | 790 | int i; |
787 | if (!capable(CAP_MAC_ADMIN)) | 791 | if (!policy_admin_capable()) |
788 | return -EPERM; | 792 | return -EPERM; |
789 | 793 | ||
790 | if (!apparmor_enabled) | 794 | if (!apparmor_enabled) |
@@ -805,7 +809,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp) | |||
805 | 809 | ||
806 | static int param_get_mode(char *buffer, struct kernel_param *kp) | 810 | static int param_get_mode(char *buffer, struct kernel_param *kp) |
807 | { | 811 | { |
808 | if (!capable(CAP_MAC_ADMIN)) | 812 | if (!policy_admin_capable()) |
809 | return -EPERM; | 813 | return -EPERM; |
810 | 814 | ||
811 | if (!apparmor_enabled) | 815 | if (!apparmor_enabled) |
@@ -817,7 +821,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp) | |||
817 | static int param_set_mode(const char *val, struct kernel_param *kp) | 821 | static int param_set_mode(const char *val, struct kernel_param *kp) |
818 | { | 822 | { |
819 | int i; | 823 | int i; |
820 | if (!capable(CAP_MAC_ADMIN)) | 824 | if (!policy_admin_capable()) |
821 | return -EPERM; | 825 | return -EPERM; |
822 | 826 | ||
823 | if (!apparmor_enabled) | 827 | if (!apparmor_enabled) |
diff --git a/security/apparmor/match.c b/security/apparmor/match.c index 727eb4200d5c..3f900fcca8fb 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c | |||
@@ -47,6 +47,8 @@ static struct table_header *unpack_table(char *blob, size_t bsize) | |||
47 | * it every time we use td_id as an index | 47 | * it every time we use td_id as an index |
48 | */ | 48 | */ |
49 | th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1; | 49 | th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1; |
50 | if (th.td_id > YYTD_ID_MAX) | ||
51 | goto out; | ||
50 | th.td_flags = be16_to_cpu(*(u16 *) (blob + 2)); | 52 | th.td_flags = be16_to_cpu(*(u16 *) (blob + 2)); |
51 | th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8)); | 53 | th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8)); |
52 | blob += sizeof(struct table_header); | 54 | blob += sizeof(struct table_header); |
@@ -61,7 +63,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize) | |||
61 | 63 | ||
62 | table = kvzalloc(tsize); | 64 | table = kvzalloc(tsize); |
63 | if (table) { | 65 | if (table) { |
64 | *table = th; | 66 | table->td_id = th.td_id; |
67 | table->td_flags = th.td_flags; | ||
68 | table->td_lolen = th.td_lolen; | ||
65 | if (th.td_flags == YYTD_DATA8) | 69 | if (th.td_flags == YYTD_DATA8) |
66 | UNPACK_ARRAY(table->td_data, blob, th.td_lolen, | 70 | UNPACK_ARRAY(table->td_data, blob, th.td_lolen, |
67 | u8, byte_to_byte); | 71 | u8, byte_to_byte); |
@@ -73,14 +77,14 @@ static struct table_header *unpack_table(char *blob, size_t bsize) | |||
73 | u32, be32_to_cpu); | 77 | u32, be32_to_cpu); |
74 | else | 78 | else |
75 | goto fail; | 79 | goto fail; |
80 | /* if table was vmalloced make sure the page tables are synced | ||
81 | * before it is used, as it goes live to all cpus. | ||
82 | */ | ||
83 | if (is_vmalloc_addr(table)) | ||
84 | vm_unmap_aliases(); | ||
76 | } | 85 | } |
77 | 86 | ||
78 | out: | 87 | out: |
79 | /* if table was vmalloced make sure the page tables are synced | ||
80 | * before it is used, as it goes live to all cpus. | ||
81 | */ | ||
82 | if (is_vmalloc_addr(table)) | ||
83 | vm_unmap_aliases(); | ||
84 | return table; | 88 | return table; |
85 | fail: | 89 | fail: |
86 | kvfree(table); | 90 | kvfree(table); |
diff --git a/security/apparmor/path.c b/security/apparmor/path.c index edddc026406b..a8fc7d08c144 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include "include/path.h" | 25 | #include "include/path.h" |
26 | #include "include/policy.h" | 26 | #include "include/policy.h" |
27 | 27 | ||
28 | |||
29 | /* modified from dcache.c */ | 28 | /* modified from dcache.c */ |
30 | static int prepend(char **buffer, int buflen, const char *str, int namelen) | 29 | static int prepend(char **buffer, int buflen, const char *str, int namelen) |
31 | { | 30 | { |
@@ -39,6 +38,38 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen) | |||
39 | 38 | ||
40 | #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT) | 39 | #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT) |
41 | 40 | ||
41 | /* If the path is not connected to the expected root, | ||
42 | * check if it is a sysctl and handle specially else remove any | ||
43 | * leading / that __d_path may have returned. | ||
44 | * Unless | ||
45 | * specifically directed to connect the path, | ||
46 | * OR | ||
47 | * if in a chroot and doing chroot relative paths and the path | ||
48 | * resolves to the namespace root (would be connected outside | ||
49 | * of chroot) and specifically directed to connect paths to | ||
50 | * namespace root. | ||
51 | */ | ||
52 | static int disconnect(const struct path *path, char *buf, char **name, | ||
53 | int flags) | ||
54 | { | ||
55 | int error = 0; | ||
56 | |||
57 | if (!(flags & PATH_CONNECT_PATH) && | ||
58 | !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && | ||
59 | our_mnt(path->mnt))) { | ||
60 | /* disconnected path, don't return pathname starting | ||
61 | * with '/' | ||
62 | */ | ||
63 | error = -EACCES; | ||
64 | if (**name == '/') | ||
65 | *name = *name + 1; | ||
66 | } else if (**name != '/') | ||
67 | /* CONNECT_PATH with missing root */ | ||
68 | error = prepend(name, *name - buf, "/", 1); | ||
69 | |||
70 | return error; | ||
71 | } | ||
72 | |||
42 | /** | 73 | /** |
43 | * d_namespace_path - lookup a name associated with a given path | 74 | * d_namespace_path - lookup a name associated with a given path |
44 | * @path: path to lookup (NOT NULL) | 75 | * @path: path to lookup (NOT NULL) |
@@ -74,7 +105,8 @@ static int d_namespace_path(const struct path *path, char *buf, int buflen, | |||
74 | * control instead of hard coded /proc | 105 | * control instead of hard coded /proc |
75 | */ | 106 | */ |
76 | return prepend(name, *name - buf, "/proc", 5); | 107 | return prepend(name, *name - buf, "/proc", 5); |
77 | } | 108 | } else |
109 | return disconnect(path, buf, name, flags); | ||
78 | return 0; | 110 | return 0; |
79 | } | 111 | } |
80 | 112 | ||
@@ -120,29 +152,8 @@ static int d_namespace_path(const struct path *path, char *buf, int buflen, | |||
120 | goto out; | 152 | goto out; |
121 | } | 153 | } |
122 | 154 | ||
123 | /* If the path is not connected to the expected root, | 155 | if (!connected) |
124 | * check if it is a sysctl and handle specially else remove any | 156 | error = disconnect(path, buf, name, flags); |
125 | * leading / that __d_path may have returned. | ||
126 | * Unless | ||
127 | * specifically directed to connect the path, | ||
128 | * OR | ||
129 | * if in a chroot and doing chroot relative paths and the path | ||
130 | * resolves to the namespace root (would be connected outside | ||
131 | * of chroot) and specifically directed to connect paths to | ||
132 | * namespace root. | ||
133 | */ | ||
134 | if (!connected) { | ||
135 | if (!(flags & PATH_CONNECT_PATH) && | ||
136 | !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && | ||
137 | our_mnt(path->mnt))) { | ||
138 | /* disconnected path, don't return pathname starting | ||
139 | * with '/' | ||
140 | */ | ||
141 | error = -EACCES; | ||
142 | if (*res == '/') | ||
143 | *name = res + 1; | ||
144 | } | ||
145 | } | ||
146 | 157 | ||
147 | out: | 158 | out: |
148 | return error; | 159 | return error; |
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 705c2879d3a9..179e68d7dc5f 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c | |||
@@ -766,7 +766,9 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name) | |||
766 | struct aa_profile *profile; | 766 | struct aa_profile *profile; |
767 | 767 | ||
768 | rcu_read_lock(); | 768 | rcu_read_lock(); |
769 | profile = aa_get_profile(__find_child(&parent->base.profiles, name)); | 769 | do { |
770 | profile = __find_child(&parent->base.profiles, name); | ||
771 | } while (profile && !aa_get_profile_not0(profile)); | ||
770 | rcu_read_unlock(); | 772 | rcu_read_unlock(); |
771 | 773 | ||
772 | /* refcount released by caller */ | 774 | /* refcount released by caller */ |
@@ -916,6 +918,22 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info, | |||
916 | &sa, NULL); | 918 | &sa, NULL); |
917 | } | 919 | } |
918 | 920 | ||
921 | bool policy_view_capable(void) | ||
922 | { | ||
923 | struct user_namespace *user_ns = current_user_ns(); | ||
924 | bool response = false; | ||
925 | |||
926 | if (ns_capable(user_ns, CAP_MAC_ADMIN)) | ||
927 | response = true; | ||
928 | |||
929 | return response; | ||
930 | } | ||
931 | |||
932 | bool policy_admin_capable(void) | ||
933 | { | ||
934 | return policy_view_capable() && !aa_g_lock_policy; | ||
935 | } | ||
936 | |||
919 | /** | 937 | /** |
920 | * aa_may_manage_policy - can the current task manage policy | 938 | * aa_may_manage_policy - can the current task manage policy |
921 | * @op: the policy manipulation operation being done | 939 | * @op: the policy manipulation operation being done |
@@ -930,7 +948,7 @@ bool aa_may_manage_policy(int op) | |||
930 | return 0; | 948 | return 0; |
931 | } | 949 | } |
932 | 950 | ||
933 | if (!capable(CAP_MAC_ADMIN)) { | 951 | if (!policy_admin_capable()) { |
934 | audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES); | 952 | audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES); |
935 | return 0; | 953 | return 0; |
936 | } | 954 | } |
@@ -1067,7 +1085,7 @@ static int __lookup_replace(struct aa_namespace *ns, const char *hname, | |||
1067 | */ | 1085 | */ |
1068 | ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) | 1086 | ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) |
1069 | { | 1087 | { |
1070 | const char *ns_name, *name = NULL, *info = NULL; | 1088 | const char *ns_name, *info = NULL; |
1071 | struct aa_namespace *ns = NULL; | 1089 | struct aa_namespace *ns = NULL; |
1072 | struct aa_load_ent *ent, *tmp; | 1090 | struct aa_load_ent *ent, *tmp; |
1073 | int op = OP_PROF_REPL; | 1091 | int op = OP_PROF_REPL; |
@@ -1082,18 +1100,15 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) | |||
1082 | /* released below */ | 1100 | /* released below */ |
1083 | ns = aa_prepare_namespace(ns_name); | 1101 | ns = aa_prepare_namespace(ns_name); |
1084 | if (!ns) { | 1102 | if (!ns) { |
1085 | info = "failed to prepare namespace"; | 1103 | error = audit_policy(op, GFP_KERNEL, ns_name, |
1086 | error = -ENOMEM; | 1104 | "failed to prepare namespace", -ENOMEM); |
1087 | name = ns_name; | 1105 | goto free; |
1088 | goto fail; | ||
1089 | } | 1106 | } |
1090 | 1107 | ||
1091 | mutex_lock(&ns->lock); | 1108 | mutex_lock(&ns->lock); |
1092 | /* setup parent and ns info */ | 1109 | /* setup parent and ns info */ |
1093 | list_for_each_entry(ent, &lh, list) { | 1110 | list_for_each_entry(ent, &lh, list) { |
1094 | struct aa_policy *policy; | 1111 | struct aa_policy *policy; |
1095 | |||
1096 | name = ent->new->base.hname; | ||
1097 | error = __lookup_replace(ns, ent->new->base.hname, noreplace, | 1112 | error = __lookup_replace(ns, ent->new->base.hname, noreplace, |
1098 | &ent->old, &info); | 1113 | &ent->old, &info); |
1099 | if (error) | 1114 | if (error) |
@@ -1121,7 +1136,6 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) | |||
1121 | if (!p) { | 1136 | if (!p) { |
1122 | error = -ENOENT; | 1137 | error = -ENOENT; |
1123 | info = "parent does not exist"; | 1138 | info = "parent does not exist"; |
1124 | name = ent->new->base.hname; | ||
1125 | goto fail_lock; | 1139 | goto fail_lock; |
1126 | } | 1140 | } |
1127 | rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); | 1141 | rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); |
@@ -1163,7 +1177,7 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) | |||
1163 | list_del_init(&ent->list); | 1177 | list_del_init(&ent->list); |
1164 | op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL; | 1178 | op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL; |
1165 | 1179 | ||
1166 | audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error); | 1180 | audit_policy(op, GFP_ATOMIC, ent->new->base.hname, NULL, error); |
1167 | 1181 | ||
1168 | if (ent->old) { | 1182 | if (ent->old) { |
1169 | __replace_profile(ent->old, ent->new, 1); | 1183 | __replace_profile(ent->old, ent->new, 1); |
@@ -1187,14 +1201,14 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) | |||
1187 | /* parent replaced in this atomic set? */ | 1201 | /* parent replaced in this atomic set? */ |
1188 | if (newest != parent) { | 1202 | if (newest != parent) { |
1189 | aa_get_profile(newest); | 1203 | aa_get_profile(newest); |
1190 | aa_put_profile(parent); | ||
1191 | rcu_assign_pointer(ent->new->parent, newest); | 1204 | rcu_assign_pointer(ent->new->parent, newest); |
1192 | } else | 1205 | aa_put_profile(parent); |
1193 | aa_put_profile(newest); | 1206 | } |
1194 | /* aafs interface uses replacedby */ | 1207 | /* aafs interface uses replacedby */ |
1195 | rcu_assign_pointer(ent->new->replacedby->profile, | 1208 | rcu_assign_pointer(ent->new->replacedby->profile, |
1196 | aa_get_profile(ent->new)); | 1209 | aa_get_profile(ent->new)); |
1197 | __list_add_profile(&parent->base.profiles, ent->new); | 1210 | __list_add_profile(&newest->base.profiles, ent->new); |
1211 | aa_put_profile(newest); | ||
1198 | } else { | 1212 | } else { |
1199 | /* aafs interface uses replacedby */ | 1213 | /* aafs interface uses replacedby */ |
1200 | rcu_assign_pointer(ent->new->replacedby->profile, | 1214 | rcu_assign_pointer(ent->new->replacedby->profile, |
@@ -1214,9 +1228,22 @@ out: | |||
1214 | 1228 | ||
1215 | fail_lock: | 1229 | fail_lock: |
1216 | mutex_unlock(&ns->lock); | 1230 | mutex_unlock(&ns->lock); |
1217 | fail: | ||
1218 | error = audit_policy(op, GFP_KERNEL, name, info, error); | ||
1219 | 1231 | ||
1232 | /* audit cause of failure */ | ||
1233 | op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL; | ||
1234 | audit_policy(op, GFP_KERNEL, ent->new->base.hname, info, error); | ||
1235 | /* audit status that rest of profiles in the atomic set failed too */ | ||
1236 | info = "valid profile in failed atomic policy load"; | ||
1237 | list_for_each_entry(tmp, &lh, list) { | ||
1238 | if (tmp == ent) { | ||
1239 | info = "unchecked profile in failed atomic policy load"; | ||
1240 | /* skip entry that caused failure */ | ||
1241 | continue; | ||
1242 | } | ||
1243 | op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL; | ||
1244 | audit_policy(op, GFP_KERNEL, tmp->new->base.hname, info, error); | ||
1245 | } | ||
1246 | free: | ||
1220 | list_for_each_entry_safe(ent, tmp, &lh, list) { | 1247 | list_for_each_entry_safe(ent, tmp, &lh, list) { |
1221 | list_del_init(&ent->list); | 1248 | list_del_init(&ent->list); |
1222 | aa_load_ent_free(ent); | 1249 | aa_load_ent_free(ent); |
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index a689f10930b5..138120698f83 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c | |||
@@ -583,6 +583,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) | |||
583 | error = PTR_ERR(profile->policy.dfa); | 583 | error = PTR_ERR(profile->policy.dfa); |
584 | profile->policy.dfa = NULL; | 584 | profile->policy.dfa = NULL; |
585 | goto fail; | 585 | goto fail; |
586 | } else if (!profile->policy.dfa) { | ||
587 | error = -EPROTO; | ||
588 | goto fail; | ||
586 | } | 589 | } |
587 | if (!unpack_u32(e, &profile->policy.start[0], "start")) | 590 | if (!unpack_u32(e, &profile->policy.start[0], "start")) |
588 | /* default start state */ | 591 | /* default start state */ |
@@ -676,7 +679,7 @@ static bool verify_xindex(int xindex, int table_size) | |||
676 | int index, xtype; | 679 | int index, xtype; |
677 | xtype = xindex & AA_X_TYPE_MASK; | 680 | xtype = xindex & AA_X_TYPE_MASK; |
678 | index = xindex & AA_X_INDEX_MASK; | 681 | index = xindex & AA_X_INDEX_MASK; |
679 | if (xtype == AA_X_TABLE && index > table_size) | 682 | if (xtype == AA_X_TABLE && index >= table_size) |
680 | return 0; | 683 | return 0; |
681 | return 1; | 684 | return 1; |
682 | } | 685 | } |
@@ -776,7 +779,7 @@ int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns) | |||
776 | goto fail_profile; | 779 | goto fail_profile; |
777 | 780 | ||
778 | error = aa_calc_profile_hash(profile, e.version, start, | 781 | error = aa_calc_profile_hash(profile, e.version, start, |
779 | e.pos - start); | 782 | e.pos - start); |
780 | if (error) | 783 | if (error) |
781 | goto fail_profile; | 784 | goto fail_profile; |
782 | 785 | ||
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c index 748bf0ca6c9f..67a6072ead4b 100644 --- a/security/apparmor/resource.c +++ b/security/apparmor/resource.c | |||
@@ -101,9 +101,11 @@ int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task, | |||
101 | /* TODO: extend resource control to handle other (non current) | 101 | /* TODO: extend resource control to handle other (non current) |
102 | * profiles. AppArmor rules currently have the implicit assumption | 102 | * profiles. AppArmor rules currently have the implicit assumption |
103 | * that the task is setting the resource of a task confined with | 103 | * that the task is setting the resource of a task confined with |
104 | * the same profile. | 104 | * the same profile or that the task setting the resource of another |
105 | * task has CAP_SYS_RESOURCE. | ||
105 | */ | 106 | */ |
106 | if (profile != task_profile || | 107 | if ((profile != task_profile && |
108 | aa_capable(profile, CAP_SYS_RESOURCE, 1)) || | ||
107 | (profile->rlimits.mask & (1 << resource) && | 109 | (profile->rlimits.mask & (1 << resource) && |
108 | new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)) | 110 | new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)) |
109 | error = -EACCES; | 111 | error = -EACCES; |
diff --git a/security/integrity/iint.c b/security/integrity/iint.c index 345b75997e4c..c710d22042f9 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c | |||
@@ -79,6 +79,7 @@ static void iint_free(struct integrity_iint_cache *iint) | |||
79 | iint->ima_bprm_status = INTEGRITY_UNKNOWN; | 79 | iint->ima_bprm_status = INTEGRITY_UNKNOWN; |
80 | iint->ima_read_status = INTEGRITY_UNKNOWN; | 80 | iint->ima_read_status = INTEGRITY_UNKNOWN; |
81 | iint->evm_status = INTEGRITY_UNKNOWN; | 81 | iint->evm_status = INTEGRITY_UNKNOWN; |
82 | iint->measured_pcrs = 0; | ||
82 | kmem_cache_free(iint_cache, iint); | 83 | kmem_cache_free(iint_cache, iint); |
83 | } | 84 | } |
84 | 85 | ||
@@ -159,6 +160,7 @@ static void init_once(void *foo) | |||
159 | iint->ima_bprm_status = INTEGRITY_UNKNOWN; | 160 | iint->ima_bprm_status = INTEGRITY_UNKNOWN; |
160 | iint->ima_read_status = INTEGRITY_UNKNOWN; | 161 | iint->ima_read_status = INTEGRITY_UNKNOWN; |
161 | iint->evm_status = INTEGRITY_UNKNOWN; | 162 | iint->evm_status = INTEGRITY_UNKNOWN; |
163 | iint->measured_pcrs = 0; | ||
162 | } | 164 | } |
163 | 165 | ||
164 | static int __init integrity_iintcache_init(void) | 166 | static int __init integrity_iintcache_init(void) |
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index d3a939bf2781..db25f54a04fe 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h | |||
@@ -88,6 +88,7 @@ struct ima_template_desc { | |||
88 | }; | 88 | }; |
89 | 89 | ||
90 | struct ima_template_entry { | 90 | struct ima_template_entry { |
91 | int pcr; | ||
91 | u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ | 92 | u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ |
92 | struct ima_template_desc *template_desc; /* template descriptor */ | 93 | struct ima_template_desc *template_desc; /* template descriptor */ |
93 | u32 template_data_len; | 94 | u32 template_data_len; |
@@ -154,7 +155,8 @@ enum ima_hooks { | |||
154 | }; | 155 | }; |
155 | 156 | ||
156 | /* LIM API function definitions */ | 157 | /* LIM API function definitions */ |
157 | int ima_get_action(struct inode *inode, int mask, enum ima_hooks func); | 158 | int ima_get_action(struct inode *inode, int mask, |
159 | enum ima_hooks func, int *pcr); | ||
158 | int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func); | 160 | int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func); |
159 | int ima_collect_measurement(struct integrity_iint_cache *iint, | 161 | int ima_collect_measurement(struct integrity_iint_cache *iint, |
160 | struct file *file, void *buf, loff_t size, | 162 | struct file *file, void *buf, loff_t size, |
@@ -162,19 +164,20 @@ int ima_collect_measurement(struct integrity_iint_cache *iint, | |||
162 | void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, | 164 | void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, |
163 | const unsigned char *filename, | 165 | const unsigned char *filename, |
164 | struct evm_ima_xattr_data *xattr_value, | 166 | struct evm_ima_xattr_data *xattr_value, |
165 | int xattr_len); | 167 | int xattr_len, int pcr); |
166 | void ima_audit_measurement(struct integrity_iint_cache *iint, | 168 | void ima_audit_measurement(struct integrity_iint_cache *iint, |
167 | const unsigned char *filename); | 169 | const unsigned char *filename); |
168 | int ima_alloc_init_template(struct ima_event_data *event_data, | 170 | int ima_alloc_init_template(struct ima_event_data *event_data, |
169 | struct ima_template_entry **entry); | 171 | struct ima_template_entry **entry); |
170 | int ima_store_template(struct ima_template_entry *entry, int violation, | 172 | int ima_store_template(struct ima_template_entry *entry, int violation, |
171 | struct inode *inode, const unsigned char *filename); | 173 | struct inode *inode, |
174 | const unsigned char *filename, int pcr); | ||
172 | void ima_free_template_entry(struct ima_template_entry *entry); | 175 | void ima_free_template_entry(struct ima_template_entry *entry); |
173 | const char *ima_d_path(const struct path *path, char **pathbuf); | 176 | const char *ima_d_path(const struct path *path, char **pathbuf); |
174 | 177 | ||
175 | /* IMA policy related functions */ | 178 | /* IMA policy related functions */ |
176 | int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, | 179 | int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, |
177 | int flags); | 180 | int flags, int *pcr); |
178 | void ima_init_policy(void); | 181 | void ima_init_policy(void); |
179 | void ima_update_policy(void); | 182 | void ima_update_policy(void); |
180 | void ima_update_policy_flag(void); | 183 | void ima_update_policy_flag(void); |
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index 5a2218fe877a..9df26a2b75ba 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c | |||
@@ -87,7 +87,7 @@ out: | |||
87 | */ | 87 | */ |
88 | int ima_store_template(struct ima_template_entry *entry, | 88 | int ima_store_template(struct ima_template_entry *entry, |
89 | int violation, struct inode *inode, | 89 | int violation, struct inode *inode, |
90 | const unsigned char *filename) | 90 | const unsigned char *filename, int pcr) |
91 | { | 91 | { |
92 | static const char op[] = "add_template_measure"; | 92 | static const char op[] = "add_template_measure"; |
93 | static const char audit_cause[] = "hashing_error"; | 93 | static const char audit_cause[] = "hashing_error"; |
@@ -114,6 +114,7 @@ int ima_store_template(struct ima_template_entry *entry, | |||
114 | } | 114 | } |
115 | memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); | 115 | memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); |
116 | } | 116 | } |
117 | entry->pcr = pcr; | ||
117 | result = ima_add_template_entry(entry, violation, op, inode, filename); | 118 | result = ima_add_template_entry(entry, violation, op, inode, filename); |
118 | return result; | 119 | return result; |
119 | } | 120 | } |
@@ -144,7 +145,8 @@ void ima_add_violation(struct file *file, const unsigned char *filename, | |||
144 | result = -ENOMEM; | 145 | result = -ENOMEM; |
145 | goto err_out; | 146 | goto err_out; |
146 | } | 147 | } |
147 | result = ima_store_template(entry, violation, inode, filename); | 148 | result = ima_store_template(entry, violation, inode, |
149 | filename, CONFIG_IMA_MEASURE_PCR_IDX); | ||
148 | if (result < 0) | 150 | if (result < 0) |
149 | ima_free_template_entry(entry); | 151 | ima_free_template_entry(entry); |
150 | err_out: | 152 | err_out: |
@@ -157,6 +159,7 @@ err_out: | |||
157 | * @inode: pointer to inode to measure | 159 | * @inode: pointer to inode to measure |
158 | * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) | 160 | * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) |
159 | * @func: caller identifier | 161 | * @func: caller identifier |
162 | * @pcr: pointer filled in if matched measure policy sets pcr= | ||
160 | * | 163 | * |
161 | * The policy is defined in terms of keypairs: | 164 | * The policy is defined in terms of keypairs: |
162 | * subj=, obj=, type=, func=, mask=, fsmagic= | 165 | * subj=, obj=, type=, func=, mask=, fsmagic= |
@@ -168,13 +171,13 @@ err_out: | |||
168 | * Returns IMA_MEASURE, IMA_APPRAISE mask. | 171 | * Returns IMA_MEASURE, IMA_APPRAISE mask. |
169 | * | 172 | * |
170 | */ | 173 | */ |
171 | int ima_get_action(struct inode *inode, int mask, enum ima_hooks func) | 174 | int ima_get_action(struct inode *inode, int mask, enum ima_hooks func, int *pcr) |
172 | { | 175 | { |
173 | int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE; | 176 | int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE; |
174 | 177 | ||
175 | flags &= ima_policy_flag; | 178 | flags &= ima_policy_flag; |
176 | 179 | ||
177 | return ima_match_policy(inode, func, mask, flags); | 180 | return ima_match_policy(inode, func, mask, flags, pcr); |
178 | } | 181 | } |
179 | 182 | ||
180 | /* | 183 | /* |
@@ -252,7 +255,7 @@ out: | |||
252 | void ima_store_measurement(struct integrity_iint_cache *iint, | 255 | void ima_store_measurement(struct integrity_iint_cache *iint, |
253 | struct file *file, const unsigned char *filename, | 256 | struct file *file, const unsigned char *filename, |
254 | struct evm_ima_xattr_data *xattr_value, | 257 | struct evm_ima_xattr_data *xattr_value, |
255 | int xattr_len) | 258 | int xattr_len, int pcr) |
256 | { | 259 | { |
257 | static const char op[] = "add_template_measure"; | 260 | static const char op[] = "add_template_measure"; |
258 | static const char audit_cause[] = "ENOMEM"; | 261 | static const char audit_cause[] = "ENOMEM"; |
@@ -263,7 +266,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint, | |||
263 | xattr_len, NULL}; | 266 | xattr_len, NULL}; |
264 | int violation = 0; | 267 | int violation = 0; |
265 | 268 | ||
266 | if (iint->flags & IMA_MEASURED) | 269 | if (iint->measured_pcrs & (0x1 << pcr)) |
267 | return; | 270 | return; |
268 | 271 | ||
269 | result = ima_alloc_init_template(&event_data, &entry); | 272 | result = ima_alloc_init_template(&event_data, &entry); |
@@ -273,9 +276,11 @@ void ima_store_measurement(struct integrity_iint_cache *iint, | |||
273 | return; | 276 | return; |
274 | } | 277 | } |
275 | 278 | ||
276 | result = ima_store_template(entry, violation, inode, filename); | 279 | result = ima_store_template(entry, violation, inode, filename, pcr); |
277 | if (!result || result == -EEXIST) | 280 | if (!result || result == -EEXIST) { |
278 | iint->flags |= IMA_MEASURED; | 281 | iint->flags |= IMA_MEASURED; |
282 | iint->measured_pcrs |= (0x1 << pcr); | ||
283 | } | ||
279 | if (result < 0) | 284 | if (result < 0) |
280 | ima_free_template_entry(entry); | 285 | ima_free_template_entry(entry); |
281 | } | 286 | } |
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 1bcbc12e03d9..4b9b4a4e1b89 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c | |||
@@ -41,7 +41,7 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func) | |||
41 | if (!ima_appraise) | 41 | if (!ima_appraise) |
42 | return 0; | 42 | return 0; |
43 | 43 | ||
44 | return ima_match_policy(inode, func, mask, IMA_APPRAISE); | 44 | return ima_match_policy(inode, func, mask, IMA_APPRAISE, NULL); |
45 | } | 45 | } |
46 | 46 | ||
47 | static int ima_fix_xattr(struct dentry *dentry, | 47 | static int ima_fix_xattr(struct dentry *dentry, |
@@ -370,6 +370,7 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig) | |||
370 | return; | 370 | return; |
371 | 371 | ||
372 | iint->flags &= ~IMA_DONE_MASK; | 372 | iint->flags &= ~IMA_DONE_MASK; |
373 | iint->measured_pcrs = 0; | ||
373 | if (digsig) | 374 | if (digsig) |
374 | iint->flags |= IMA_DIGSIG; | 375 | iint->flags |= IMA_DIGSIG; |
375 | return; | 376 | return; |
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index 60d011aaec38..c07a3844ea0a 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c | |||
@@ -123,7 +123,6 @@ static int ima_measurements_show(struct seq_file *m, void *v) | |||
123 | struct ima_template_entry *e; | 123 | struct ima_template_entry *e; |
124 | char *template_name; | 124 | char *template_name; |
125 | int namelen; | 125 | int namelen; |
126 | u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX; | ||
127 | bool is_ima_template = false; | 126 | bool is_ima_template = false; |
128 | int i; | 127 | int i; |
129 | 128 | ||
@@ -137,10 +136,10 @@ static int ima_measurements_show(struct seq_file *m, void *v) | |||
137 | 136 | ||
138 | /* | 137 | /* |
139 | * 1st: PCRIndex | 138 | * 1st: PCRIndex |
140 | * PCR used is always the same (config option) in | 139 | * PCR used defaults to the same (config option) in |
141 | * little-endian format | 140 | * little-endian format, unless set in policy |
142 | */ | 141 | */ |
143 | ima_putc(m, &pcr, sizeof(pcr)); | 142 | ima_putc(m, &e->pcr, sizeof(e->pcr)); |
144 | 143 | ||
145 | /* 2nd: template digest */ | 144 | /* 2nd: template digest */ |
146 | ima_putc(m, e->digest, TPM_DIGEST_SIZE); | 145 | ima_putc(m, e->digest, TPM_DIGEST_SIZE); |
@@ -219,7 +218,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v) | |||
219 | e->template_desc->name : e->template_desc->fmt; | 218 | e->template_desc->name : e->template_desc->fmt; |
220 | 219 | ||
221 | /* 1st: PCR used (config option) */ | 220 | /* 1st: PCR used (config option) */ |
222 | seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX); | 221 | seq_printf(m, "%2d ", e->pcr); |
223 | 222 | ||
224 | /* 2nd: SHA1 template hash */ | 223 | /* 2nd: SHA1 template hash */ |
225 | ima_print_digest(m, e->digest, TPM_DIGEST_SIZE); | 224 | ima_print_digest(m, e->digest, TPM_DIGEST_SIZE); |
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 5d679a685616..32912bd54ead 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c | |||
@@ -79,7 +79,8 @@ static int __init ima_add_boot_aggregate(void) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | result = ima_store_template(entry, violation, NULL, | 81 | result = ima_store_template(entry, violation, NULL, |
82 | boot_aggregate_name); | 82 | boot_aggregate_name, |
83 | CONFIG_IMA_MEASURE_PCR_IDX); | ||
83 | if (result < 0) { | 84 | if (result < 0) { |
84 | ima_free_template_entry(entry); | 85 | ima_free_template_entry(entry); |
85 | audit_cause = "store_entry"; | 86 | audit_cause = "store_entry"; |
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 68b26c340acd..596ef616ac21 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c | |||
@@ -125,6 +125,7 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint, | |||
125 | if ((iint->version != inode->i_version) || | 125 | if ((iint->version != inode->i_version) || |
126 | (iint->flags & IMA_NEW_FILE)) { | 126 | (iint->flags & IMA_NEW_FILE)) { |
127 | iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); | 127 | iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); |
128 | iint->measured_pcrs = 0; | ||
128 | if (iint->flags & IMA_APPRAISE) | 129 | if (iint->flags & IMA_APPRAISE) |
129 | ima_update_xattr(iint, file); | 130 | ima_update_xattr(iint, file); |
130 | } | 131 | } |
@@ -162,6 +163,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, | |||
162 | char *pathbuf = NULL; | 163 | char *pathbuf = NULL; |
163 | const char *pathname = NULL; | 164 | const char *pathname = NULL; |
164 | int rc = -ENOMEM, action, must_appraise; | 165 | int rc = -ENOMEM, action, must_appraise; |
166 | int pcr = CONFIG_IMA_MEASURE_PCR_IDX; | ||
165 | struct evm_ima_xattr_data *xattr_value = NULL; | 167 | struct evm_ima_xattr_data *xattr_value = NULL; |
166 | int xattr_len = 0; | 168 | int xattr_len = 0; |
167 | bool violation_check; | 169 | bool violation_check; |
@@ -174,7 +176,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, | |||
174 | * bitmask based on the appraise/audit/measurement policy. | 176 | * bitmask based on the appraise/audit/measurement policy. |
175 | * Included is the appraise submask. | 177 | * Included is the appraise submask. |
176 | */ | 178 | */ |
177 | action = ima_get_action(inode, mask, func); | 179 | action = ima_get_action(inode, mask, func, &pcr); |
178 | violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) && | 180 | violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) && |
179 | (ima_policy_flag & IMA_MEASURE)); | 181 | (ima_policy_flag & IMA_MEASURE)); |
180 | if (!action && !violation_check) | 182 | if (!action && !violation_check) |
@@ -209,7 +211,11 @@ static int process_measurement(struct file *file, char *buf, loff_t size, | |||
209 | */ | 211 | */ |
210 | iint->flags |= action; | 212 | iint->flags |= action; |
211 | action &= IMA_DO_MASK; | 213 | action &= IMA_DO_MASK; |
212 | action &= ~((iint->flags & IMA_DONE_MASK) >> 1); | 214 | action &= ~((iint->flags & (IMA_DONE_MASK ^ IMA_MEASURED)) >> 1); |
215 | |||
216 | /* If target pcr is already measured, unset IMA_MEASURE action */ | ||
217 | if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr))) | ||
218 | action ^= IMA_MEASURE; | ||
213 | 219 | ||
214 | /* Nothing to do, just return existing appraised status */ | 220 | /* Nothing to do, just return existing appraised status */ |
215 | if (!action) { | 221 | if (!action) { |
@@ -238,7 +244,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, | |||
238 | 244 | ||
239 | if (action & IMA_MEASURE) | 245 | if (action & IMA_MEASURE) |
240 | ima_store_measurement(iint, file, pathname, | 246 | ima_store_measurement(iint, file, pathname, |
241 | xattr_value, xattr_len); | 247 | xattr_value, xattr_len, pcr); |
242 | if (action & IMA_APPRAISE_SUBMASK) | 248 | if (action & IMA_APPRAISE_SUBMASK) |
243 | rc = ima_appraise_measurement(func, iint, file, pathname, | 249 | rc = ima_appraise_measurement(func, iint, file, pathname, |
244 | xattr_value, xattr_len, opened); | 250 | xattr_value, xattr_len, opened); |
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 0f887a564a29..aed47b777a57 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #define IMA_FSUUID 0x0020 | 32 | #define IMA_FSUUID 0x0020 |
33 | #define IMA_INMASK 0x0040 | 33 | #define IMA_INMASK 0x0040 |
34 | #define IMA_EUID 0x0080 | 34 | #define IMA_EUID 0x0080 |
35 | #define IMA_PCR 0x0100 | ||
35 | 36 | ||
36 | #define UNKNOWN 0 | 37 | #define UNKNOWN 0 |
37 | #define MEASURE 0x0001 /* same as IMA_MEASURE */ | 38 | #define MEASURE 0x0001 /* same as IMA_MEASURE */ |
@@ -40,6 +41,9 @@ | |||
40 | #define DONT_APPRAISE 0x0008 | 41 | #define DONT_APPRAISE 0x0008 |
41 | #define AUDIT 0x0040 | 42 | #define AUDIT 0x0040 |
42 | 43 | ||
44 | #define INVALID_PCR(a) (((a) < 0) || \ | ||
45 | (a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8)) | ||
46 | |||
43 | int ima_policy_flag; | 47 | int ima_policy_flag; |
44 | static int temp_ima_appraise; | 48 | static int temp_ima_appraise; |
45 | 49 | ||
@@ -60,6 +64,7 @@ struct ima_rule_entry { | |||
60 | u8 fsuuid[16]; | 64 | u8 fsuuid[16]; |
61 | kuid_t uid; | 65 | kuid_t uid; |
62 | kuid_t fowner; | 66 | kuid_t fowner; |
67 | int pcr; | ||
63 | struct { | 68 | struct { |
64 | void *rule; /* LSM file metadata specific */ | 69 | void *rule; /* LSM file metadata specific */ |
65 | void *args_p; /* audit value */ | 70 | void *args_p; /* audit value */ |
@@ -319,6 +324,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func) | |||
319 | * @inode: pointer to an inode for which the policy decision is being made | 324 | * @inode: pointer to an inode for which the policy decision is being made |
320 | * @func: IMA hook identifier | 325 | * @func: IMA hook identifier |
321 | * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) | 326 | * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) |
327 | * @pcr: set the pcr to extend | ||
322 | * | 328 | * |
323 | * Measure decision based on func/mask/fsmagic and LSM(subj/obj/type) | 329 | * Measure decision based on func/mask/fsmagic and LSM(subj/obj/type) |
324 | * conditions. | 330 | * conditions. |
@@ -328,7 +334,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func) | |||
328 | * than writes so ima_match_policy() is classical RCU candidate. | 334 | * than writes so ima_match_policy() is classical RCU candidate. |
329 | */ | 335 | */ |
330 | int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, | 336 | int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, |
331 | int flags) | 337 | int flags, int *pcr) |
332 | { | 338 | { |
333 | struct ima_rule_entry *entry; | 339 | struct ima_rule_entry *entry; |
334 | int action = 0, actmask = flags | (flags << 1); | 340 | int action = 0, actmask = flags | (flags << 1); |
@@ -353,6 +359,9 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, | |||
353 | else | 359 | else |
354 | actmask &= ~(entry->action | entry->action >> 1); | 360 | actmask &= ~(entry->action | entry->action >> 1); |
355 | 361 | ||
362 | if ((pcr) && (entry->flags & IMA_PCR)) | ||
363 | *pcr = entry->pcr; | ||
364 | |||
356 | if (!actmask) | 365 | if (!actmask) |
357 | break; | 366 | break; |
358 | } | 367 | } |
@@ -478,7 +487,8 @@ enum { | |||
478 | Opt_subj_user, Opt_subj_role, Opt_subj_type, | 487 | Opt_subj_user, Opt_subj_role, Opt_subj_type, |
479 | Opt_func, Opt_mask, Opt_fsmagic, | 488 | Opt_func, Opt_mask, Opt_fsmagic, |
480 | Opt_fsuuid, Opt_uid, Opt_euid, Opt_fowner, | 489 | Opt_fsuuid, Opt_uid, Opt_euid, Opt_fowner, |
481 | Opt_appraise_type, Opt_permit_directio | 490 | Opt_appraise_type, Opt_permit_directio, |
491 | Opt_pcr | ||
482 | }; | 492 | }; |
483 | 493 | ||
484 | static match_table_t policy_tokens = { | 494 | static match_table_t policy_tokens = { |
@@ -502,6 +512,7 @@ static match_table_t policy_tokens = { | |||
502 | {Opt_fowner, "fowner=%s"}, | 512 | {Opt_fowner, "fowner=%s"}, |
503 | {Opt_appraise_type, "appraise_type=%s"}, | 513 | {Opt_appraise_type, "appraise_type=%s"}, |
504 | {Opt_permit_directio, "permit_directio"}, | 514 | {Opt_permit_directio, "permit_directio"}, |
515 | {Opt_pcr, "pcr=%s"}, | ||
505 | {Opt_err, NULL} | 516 | {Opt_err, NULL} |
506 | }; | 517 | }; |
507 | 518 | ||
@@ -774,6 +785,20 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) | |||
774 | case Opt_permit_directio: | 785 | case Opt_permit_directio: |
775 | entry->flags |= IMA_PERMIT_DIRECTIO; | 786 | entry->flags |= IMA_PERMIT_DIRECTIO; |
776 | break; | 787 | break; |
788 | case Opt_pcr: | ||
789 | if (entry->action != MEASURE) { | ||
790 | result = -EINVAL; | ||
791 | break; | ||
792 | } | ||
793 | ima_log_string(ab, "pcr", args[0].from); | ||
794 | |||
795 | result = kstrtoint(args[0].from, 10, &entry->pcr); | ||
796 | if (result || INVALID_PCR(entry->pcr)) | ||
797 | result = -EINVAL; | ||
798 | else | ||
799 | entry->flags |= IMA_PCR; | ||
800 | |||
801 | break; | ||
777 | case Opt_err: | 802 | case Opt_err: |
778 | ima_log_string(ab, "UNKNOWN", p); | 803 | ima_log_string(ab, "UNKNOWN", p); |
779 | result = -EINVAL; | 804 | result = -EINVAL; |
@@ -1011,6 +1036,12 @@ int ima_policy_show(struct seq_file *m, void *v) | |||
1011 | seq_puts(m, " "); | 1036 | seq_puts(m, " "); |
1012 | } | 1037 | } |
1013 | 1038 | ||
1039 | if (entry->flags & IMA_PCR) { | ||
1040 | snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr); | ||
1041 | seq_printf(m, pt(Opt_pcr), tbuf); | ||
1042 | seq_puts(m, " "); | ||
1043 | } | ||
1044 | |||
1014 | if (entry->flags & IMA_FSUUID) { | 1045 | if (entry->flags & IMA_FSUUID) { |
1015 | seq_printf(m, "fsuuid=%pU", entry->fsuuid); | 1046 | seq_printf(m, "fsuuid=%pU", entry->fsuuid); |
1016 | seq_puts(m, " "); | 1047 | seq_puts(m, " "); |
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index 552705d5a78d..32f6ac0f96df 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c | |||
@@ -44,7 +44,8 @@ struct ima_h_table ima_htable = { | |||
44 | static DEFINE_MUTEX(ima_extend_list_mutex); | 44 | static DEFINE_MUTEX(ima_extend_list_mutex); |
45 | 45 | ||
46 | /* lookup up the digest value in the hash table, and return the entry */ | 46 | /* lookup up the digest value in the hash table, and return the entry */ |
47 | static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value) | 47 | static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value, |
48 | int pcr) | ||
48 | { | 49 | { |
49 | struct ima_queue_entry *qe, *ret = NULL; | 50 | struct ima_queue_entry *qe, *ret = NULL; |
50 | unsigned int key; | 51 | unsigned int key; |
@@ -54,7 +55,7 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value) | |||
54 | rcu_read_lock(); | 55 | rcu_read_lock(); |
55 | hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { | 56 | hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { |
56 | rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE); | 57 | rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE); |
57 | if (rc == 0) { | 58 | if ((rc == 0) && (qe->entry->pcr == pcr)) { |
58 | ret = qe; | 59 | ret = qe; |
59 | break; | 60 | break; |
60 | } | 61 | } |
@@ -89,14 +90,14 @@ static int ima_add_digest_entry(struct ima_template_entry *entry) | |||
89 | return 0; | 90 | return 0; |
90 | } | 91 | } |
91 | 92 | ||
92 | static int ima_pcr_extend(const u8 *hash) | 93 | static int ima_pcr_extend(const u8 *hash, int pcr) |
93 | { | 94 | { |
94 | int result = 0; | 95 | int result = 0; |
95 | 96 | ||
96 | if (!ima_used_chip) | 97 | if (!ima_used_chip) |
97 | return result; | 98 | return result; |
98 | 99 | ||
99 | result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); | 100 | result = tpm_pcr_extend(TPM_ANY_NUM, pcr, hash); |
100 | if (result != 0) | 101 | if (result != 0) |
101 | pr_err("Error Communicating to TPM chip, result: %d\n", result); | 102 | pr_err("Error Communicating to TPM chip, result: %d\n", result); |
102 | return result; | 103 | return result; |
@@ -118,7 +119,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, | |||
118 | mutex_lock(&ima_extend_list_mutex); | 119 | mutex_lock(&ima_extend_list_mutex); |
119 | if (!violation) { | 120 | if (!violation) { |
120 | memcpy(digest, entry->digest, sizeof(digest)); | 121 | memcpy(digest, entry->digest, sizeof(digest)); |
121 | if (ima_lookup_digest_entry(digest)) { | 122 | if (ima_lookup_digest_entry(digest, entry->pcr)) { |
122 | audit_cause = "hash_exists"; | 123 | audit_cause = "hash_exists"; |
123 | result = -EEXIST; | 124 | result = -EEXIST; |
124 | goto out; | 125 | goto out; |
@@ -135,7 +136,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, | |||
135 | if (violation) /* invalidate pcr */ | 136 | if (violation) /* invalidate pcr */ |
136 | memset(digest, 0xff, sizeof(digest)); | 137 | memset(digest, 0xff, sizeof(digest)); |
137 | 138 | ||
138 | tpmresult = ima_pcr_extend(digest); | 139 | tpmresult = ima_pcr_extend(digest, entry->pcr); |
139 | if (tpmresult != 0) { | 140 | if (tpmresult != 0) { |
140 | snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", | 141 | snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", |
141 | tpmresult); | 142 | tpmresult); |
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h index 90bc57d796ec..24520b4ef3b0 100644 --- a/security/integrity/integrity.h +++ b/security/integrity/integrity.h | |||
@@ -103,6 +103,7 @@ struct integrity_iint_cache { | |||
103 | struct inode *inode; /* back pointer to inode in question */ | 103 | struct inode *inode; /* back pointer to inode in question */ |
104 | u64 version; /* track inode changes */ | 104 | u64 version; /* track inode changes */ |
105 | unsigned long flags; | 105 | unsigned long flags; |
106 | unsigned long measured_pcrs; | ||
106 | enum integrity_status ima_file_status:4; | 107 | enum integrity_status ima_file_status:4; |
107 | enum integrity_status ima_mmap_status:4; | 108 | enum integrity_status ima_mmap_status:4; |
108 | enum integrity_status ima_bprm_status:4; | 109 | enum integrity_status ima_bprm_status:4; |
diff --git a/security/keys/persistent.c b/security/keys/persistent.c index 2ef45b319dd9..1edc1f0a0ce2 100644 --- a/security/keys/persistent.c +++ b/security/keys/persistent.c | |||
@@ -114,7 +114,7 @@ found: | |||
114 | ret = key_link(key_ref_to_ptr(dest_ref), persistent); | 114 | ret = key_link(key_ref_to_ptr(dest_ref), persistent); |
115 | if (ret == 0) { | 115 | if (ret == 0) { |
116 | key_set_timeout(persistent, persistent_keyring_expiry); | 116 | key_set_timeout(persistent, persistent_keyring_expiry); |
117 | ret = persistent->serial; | 117 | ret = persistent->serial; |
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index a29e3554751e..43affcf10b22 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -442,7 +442,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx, | |||
442 | 442 | ||
443 | if (ctx->index_key.type == &key_type_keyring) | 443 | if (ctx->index_key.type == &key_type_keyring) |
444 | return ERR_PTR(-EPERM); | 444 | return ERR_PTR(-EPERM); |
445 | 445 | ||
446 | user = key_user_lookup(current_fsuid()); | 446 | user = key_user_lookup(current_fsuid()); |
447 | if (!user) | 447 | if (!user) |
448 | return ERR_PTR(-ENOMEM); | 448 | return ERR_PTR(-ENOMEM); |
diff --git a/security/security.c b/security/security.c index 709569305d32..c4bb47db30ee 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -700,18 +700,39 @@ int security_inode_killpriv(struct dentry *dentry) | |||
700 | 700 | ||
701 | int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) | 701 | int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) |
702 | { | 702 | { |
703 | struct security_hook_list *hp; | ||
704 | int rc; | ||
705 | |||
703 | if (unlikely(IS_PRIVATE(inode))) | 706 | if (unlikely(IS_PRIVATE(inode))) |
704 | return -EOPNOTSUPP; | 707 | return -EOPNOTSUPP; |
705 | return call_int_hook(inode_getsecurity, -EOPNOTSUPP, inode, name, | 708 | /* |
706 | buffer, alloc); | 709 | * Only one module will provide an attribute with a given name. |
710 | */ | ||
711 | list_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) { | ||
712 | rc = hp->hook.inode_getsecurity(inode, name, buffer, alloc); | ||
713 | if (rc != -EOPNOTSUPP) | ||
714 | return rc; | ||
715 | } | ||
716 | return -EOPNOTSUPP; | ||
707 | } | 717 | } |
708 | 718 | ||
709 | int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) | 719 | int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) |
710 | { | 720 | { |
721 | struct security_hook_list *hp; | ||
722 | int rc; | ||
723 | |||
711 | if (unlikely(IS_PRIVATE(inode))) | 724 | if (unlikely(IS_PRIVATE(inode))) |
712 | return -EOPNOTSUPP; | 725 | return -EOPNOTSUPP; |
713 | return call_int_hook(inode_setsecurity, -EOPNOTSUPP, inode, name, | 726 | /* |
714 | value, size, flags); | 727 | * Only one module will provide an attribute with a given name. |
728 | */ | ||
729 | list_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) { | ||
730 | rc = hp->hook.inode_setsecurity(inode, name, value, size, | ||
731 | flags); | ||
732 | if (rc != -EOPNOTSUPP) | ||
733 | return rc; | ||
734 | } | ||
735 | return -EOPNOTSUPP; | ||
715 | } | 736 | } |
716 | 737 | ||
717 | int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) | 738 | int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 19be9d39c742..ec30880c4b98 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -4627,13 +4627,13 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
4627 | err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif, | 4627 | err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif, |
4628 | addrp, family, peer_sid, &ad); | 4628 | addrp, family, peer_sid, &ad); |
4629 | if (err) { | 4629 | if (err) { |
4630 | selinux_netlbl_err(skb, err, 0); | 4630 | selinux_netlbl_err(skb, family, err, 0); |
4631 | return err; | 4631 | return err; |
4632 | } | 4632 | } |
4633 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, | 4633 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, |
4634 | PEER__RECV, &ad); | 4634 | PEER__RECV, &ad); |
4635 | if (err) { | 4635 | if (err) { |
4636 | selinux_netlbl_err(skb, err, 0); | 4636 | selinux_netlbl_err(skb, family, err, 0); |
4637 | return err; | 4637 | return err; |
4638 | } | 4638 | } |
4639 | } | 4639 | } |
@@ -5001,7 +5001,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, | |||
5001 | err = selinux_inet_sys_rcv_skb(dev_net(indev), indev->ifindex, | 5001 | err = selinux_inet_sys_rcv_skb(dev_net(indev), indev->ifindex, |
5002 | addrp, family, peer_sid, &ad); | 5002 | addrp, family, peer_sid, &ad); |
5003 | if (err) { | 5003 | if (err) { |
5004 | selinux_netlbl_err(skb, err, 1); | 5004 | selinux_netlbl_err(skb, family, err, 1); |
5005 | return NF_DROP; | 5005 | return NF_DROP; |
5006 | } | 5006 | } |
5007 | } | 5007 | } |
@@ -5087,6 +5087,15 @@ static unsigned int selinux_ipv4_output(void *priv, | |||
5087 | return selinux_ip_output(skb, PF_INET); | 5087 | return selinux_ip_output(skb, PF_INET); |
5088 | } | 5088 | } |
5089 | 5089 | ||
5090 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
5091 | static unsigned int selinux_ipv6_output(void *priv, | ||
5092 | struct sk_buff *skb, | ||
5093 | const struct nf_hook_state *state) | ||
5094 | { | ||
5095 | return selinux_ip_output(skb, PF_INET6); | ||
5096 | } | ||
5097 | #endif /* IPV6 */ | ||
5098 | |||
5090 | static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, | 5099 | static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, |
5091 | int ifindex, | 5100 | int ifindex, |
5092 | u16 family) | 5101 | u16 family) |
@@ -6321,6 +6330,12 @@ static struct nf_hook_ops selinux_nf_ops[] = { | |||
6321 | .hooknum = NF_INET_FORWARD, | 6330 | .hooknum = NF_INET_FORWARD, |
6322 | .priority = NF_IP6_PRI_SELINUX_FIRST, | 6331 | .priority = NF_IP6_PRI_SELINUX_FIRST, |
6323 | }, | 6332 | }, |
6333 | { | ||
6334 | .hook = selinux_ipv6_output, | ||
6335 | .pf = NFPROTO_IPV6, | ||
6336 | .hooknum = NF_INET_LOCAL_OUT, | ||
6337 | .priority = NF_IP6_PRI_SELINUX_FIRST, | ||
6338 | }, | ||
6324 | #endif /* IPV6 */ | 6339 | #endif /* IPV6 */ |
6325 | }; | 6340 | }; |
6326 | 6341 | ||
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h index 8c59b8f150e8..75686d53df07 100644 --- a/security/selinux/include/netlabel.h +++ b/security/selinux/include/netlabel.h | |||
@@ -40,7 +40,8 @@ | |||
40 | #ifdef CONFIG_NETLABEL | 40 | #ifdef CONFIG_NETLABEL |
41 | void selinux_netlbl_cache_invalidate(void); | 41 | void selinux_netlbl_cache_invalidate(void); |
42 | 42 | ||
43 | void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway); | 43 | void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error, |
44 | int gateway); | ||
44 | 45 | ||
45 | void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec); | 46 | void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec); |
46 | void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec); | 47 | void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec); |
@@ -72,6 +73,7 @@ static inline void selinux_netlbl_cache_invalidate(void) | |||
72 | } | 73 | } |
73 | 74 | ||
74 | static inline void selinux_netlbl_err(struct sk_buff *skb, | 75 | static inline void selinux_netlbl_err(struct sk_buff *skb, |
76 | u16 family, | ||
75 | int error, | 77 | int error, |
76 | int gateway) | 78 | int gateway) |
77 | { | 79 | { |
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c index 1f989a539fd4..aaba6677ee2e 100644 --- a/security/selinux/netlabel.c +++ b/security/selinux/netlabel.c | |||
@@ -54,6 +54,7 @@ | |||
54 | * | 54 | * |
55 | */ | 55 | */ |
56 | static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb, | 56 | static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb, |
57 | u16 family, | ||
57 | struct netlbl_lsm_secattr *secattr, | 58 | struct netlbl_lsm_secattr *secattr, |
58 | u32 *sid) | 59 | u32 *sid) |
59 | { | 60 | { |
@@ -63,7 +64,7 @@ static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb, | |||
63 | if (rc == 0 && | 64 | if (rc == 0 && |
64 | (secattr->flags & NETLBL_SECATTR_CACHEABLE) && | 65 | (secattr->flags & NETLBL_SECATTR_CACHEABLE) && |
65 | (secattr->flags & NETLBL_SECATTR_CACHE)) | 66 | (secattr->flags & NETLBL_SECATTR_CACHE)) |
66 | netlbl_cache_add(skb, secattr); | 67 | netlbl_cache_add(skb, family, secattr); |
67 | 68 | ||
68 | return rc; | 69 | return rc; |
69 | } | 70 | } |
@@ -151,9 +152,9 @@ void selinux_netlbl_cache_invalidate(void) | |||
151 | * present on the packet, NetLabel is smart enough to only act when it should. | 152 | * present on the packet, NetLabel is smart enough to only act when it should. |
152 | * | 153 | * |
153 | */ | 154 | */ |
154 | void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway) | 155 | void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error, int gateway) |
155 | { | 156 | { |
156 | netlbl_skbuff_err(skb, error, gateway); | 157 | netlbl_skbuff_err(skb, family, error, gateway); |
157 | } | 158 | } |
158 | 159 | ||
159 | /** | 160 | /** |
@@ -214,7 +215,8 @@ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, | |||
214 | netlbl_secattr_init(&secattr); | 215 | netlbl_secattr_init(&secattr); |
215 | rc = netlbl_skbuff_getattr(skb, family, &secattr); | 216 | rc = netlbl_skbuff_getattr(skb, family, &secattr); |
216 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) | 217 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) |
217 | rc = selinux_netlbl_sidlookup_cached(skb, &secattr, sid); | 218 | rc = selinux_netlbl_sidlookup_cached(skb, family, |
219 | &secattr, sid); | ||
218 | else | 220 | else |
219 | *sid = SECSID_NULL; | 221 | *sid = SECSID_NULL; |
220 | *type = secattr.type; | 222 | *type = secattr.type; |
@@ -284,7 +286,7 @@ int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family) | |||
284 | int rc; | 286 | int rc; |
285 | struct netlbl_lsm_secattr secattr; | 287 | struct netlbl_lsm_secattr secattr; |
286 | 288 | ||
287 | if (family != PF_INET) | 289 | if (family != PF_INET && family != PF_INET6) |
288 | return 0; | 290 | return 0; |
289 | 291 | ||
290 | netlbl_secattr_init(&secattr); | 292 | netlbl_secattr_init(&secattr); |
@@ -333,7 +335,7 @@ int selinux_netlbl_socket_post_create(struct sock *sk, u16 family) | |||
333 | struct sk_security_struct *sksec = sk->sk_security; | 335 | struct sk_security_struct *sksec = sk->sk_security; |
334 | struct netlbl_lsm_secattr *secattr; | 336 | struct netlbl_lsm_secattr *secattr; |
335 | 337 | ||
336 | if (family != PF_INET) | 338 | if (family != PF_INET && family != PF_INET6) |
337 | return 0; | 339 | return 0; |
338 | 340 | ||
339 | secattr = selinux_netlbl_sock_genattr(sk); | 341 | secattr = selinux_netlbl_sock_genattr(sk); |
@@ -382,7 +384,8 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec, | |||
382 | netlbl_secattr_init(&secattr); | 384 | netlbl_secattr_init(&secattr); |
383 | rc = netlbl_skbuff_getattr(skb, family, &secattr); | 385 | rc = netlbl_skbuff_getattr(skb, family, &secattr); |
384 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) | 386 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) |
385 | rc = selinux_netlbl_sidlookup_cached(skb, &secattr, &nlbl_sid); | 387 | rc = selinux_netlbl_sidlookup_cached(skb, family, |
388 | &secattr, &nlbl_sid); | ||
386 | else | 389 | else |
387 | nlbl_sid = SECINITSID_UNLABELED; | 390 | nlbl_sid = SECINITSID_UNLABELED; |
388 | netlbl_secattr_destroy(&secattr); | 391 | netlbl_secattr_destroy(&secattr); |
@@ -405,11 +408,26 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec, | |||
405 | return 0; | 408 | return 0; |
406 | 409 | ||
407 | if (nlbl_sid != SECINITSID_UNLABELED) | 410 | if (nlbl_sid != SECINITSID_UNLABELED) |
408 | netlbl_skbuff_err(skb, rc, 0); | 411 | netlbl_skbuff_err(skb, family, rc, 0); |
409 | return rc; | 412 | return rc; |
410 | } | 413 | } |
411 | 414 | ||
412 | /** | 415 | /** |
416 | * selinux_netlbl_option - Is this a NetLabel option | ||
417 | * @level: the socket level or protocol | ||
418 | * @optname: the socket option name | ||
419 | * | ||
420 | * Description: | ||
421 | * Returns true if @level and @optname refer to a NetLabel option. | ||
422 | * Helper for selinux_netlbl_socket_setsockopt(). | ||
423 | */ | ||
424 | static inline int selinux_netlbl_option(int level, int optname) | ||
425 | { | ||
426 | return (level == IPPROTO_IP && optname == IP_OPTIONS) || | ||
427 | (level == IPPROTO_IPV6 && optname == IPV6_HOPOPTS); | ||
428 | } | ||
429 | |||
430 | /** | ||
413 | * selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel | 431 | * selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel |
414 | * @sock: the socket | 432 | * @sock: the socket |
415 | * @level: the socket level or protocol | 433 | * @level: the socket level or protocol |
@@ -431,7 +449,7 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock, | |||
431 | struct sk_security_struct *sksec = sk->sk_security; | 449 | struct sk_security_struct *sksec = sk->sk_security; |
432 | struct netlbl_lsm_secattr secattr; | 450 | struct netlbl_lsm_secattr secattr; |
433 | 451 | ||
434 | if (level == IPPROTO_IP && optname == IP_OPTIONS && | 452 | if (selinux_netlbl_option(level, optname) && |
435 | (sksec->nlbl_state == NLBL_LABELED || | 453 | (sksec->nlbl_state == NLBL_LABELED || |
436 | sksec->nlbl_state == NLBL_CONNLABELED)) { | 454 | sksec->nlbl_state == NLBL_CONNLABELED)) { |
437 | netlbl_secattr_init(&secattr); | 455 | netlbl_secattr_init(&secattr); |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 1b1fd27de632..0765c5b053b5 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -1347,7 +1347,7 @@ static ssize_t sel_write_avc_cache_threshold(struct file *file, | |||
1347 | { | 1347 | { |
1348 | char *page; | 1348 | char *page; |
1349 | ssize_t ret; | 1349 | ssize_t ret; |
1350 | int new_value; | 1350 | unsigned int new_value; |
1351 | 1351 | ||
1352 | ret = task_has_security(current, SECURITY__SETSECPARAM); | 1352 | ret = task_has_security(current, SECURITY__SETSECPARAM); |
1353 | if (ret) | 1353 | if (ret) |
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index 57644b1dc42e..894b6cdc11c5 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c | |||
@@ -165,7 +165,7 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap, | |||
165 | e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); | 165 | e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); |
166 | if (e_iter == NULL) | 166 | if (e_iter == NULL) |
167 | goto netlbl_import_failure; | 167 | goto netlbl_import_failure; |
168 | e_iter->startbit = offset & ~(EBITMAP_SIZE - 1); | 168 | e_iter->startbit = offset - (offset % EBITMAP_SIZE); |
169 | if (e_prev == NULL) | 169 | if (e_prev == NULL) |
170 | ebmap->node = e_iter; | 170 | ebmap->node = e_iter; |
171 | else | 171 | else |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 89df64672b89..082b20c78363 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -543,7 +543,7 @@ static void type_attribute_bounds_av(struct context *scontext, | |||
543 | struct av_decision *avd) | 543 | struct av_decision *avd) |
544 | { | 544 | { |
545 | struct context lo_scontext; | 545 | struct context lo_scontext; |
546 | struct context lo_tcontext; | 546 | struct context lo_tcontext, *tcontextp = tcontext; |
547 | struct av_decision lo_avd; | 547 | struct av_decision lo_avd; |
548 | struct type_datum *source; | 548 | struct type_datum *source; |
549 | struct type_datum *target; | 549 | struct type_datum *target; |
@@ -553,67 +553,41 @@ static void type_attribute_bounds_av(struct context *scontext, | |||
553 | scontext->type - 1); | 553 | scontext->type - 1); |
554 | BUG_ON(!source); | 554 | BUG_ON(!source); |
555 | 555 | ||
556 | if (!source->bounds) | ||
557 | return; | ||
558 | |||
556 | target = flex_array_get_ptr(policydb.type_val_to_struct_array, | 559 | target = flex_array_get_ptr(policydb.type_val_to_struct_array, |
557 | tcontext->type - 1); | 560 | tcontext->type - 1); |
558 | BUG_ON(!target); | 561 | BUG_ON(!target); |
559 | 562 | ||
560 | if (source->bounds) { | 563 | memset(&lo_avd, 0, sizeof(lo_avd)); |
561 | memset(&lo_avd, 0, sizeof(lo_avd)); | ||
562 | |||
563 | memcpy(&lo_scontext, scontext, sizeof(lo_scontext)); | ||
564 | lo_scontext.type = source->bounds; | ||
565 | 564 | ||
566 | context_struct_compute_av(&lo_scontext, | 565 | memcpy(&lo_scontext, scontext, sizeof(lo_scontext)); |
567 | tcontext, | 566 | lo_scontext.type = source->bounds; |
568 | tclass, | ||
569 | &lo_avd, | ||
570 | NULL); | ||
571 | if ((lo_avd.allowed & avd->allowed) == avd->allowed) | ||
572 | return; /* no masked permission */ | ||
573 | masked = ~lo_avd.allowed & avd->allowed; | ||
574 | } | ||
575 | 567 | ||
576 | if (target->bounds) { | 568 | if (target->bounds) { |
577 | memset(&lo_avd, 0, sizeof(lo_avd)); | ||
578 | |||
579 | memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext)); | 569 | memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext)); |
580 | lo_tcontext.type = target->bounds; | 570 | lo_tcontext.type = target->bounds; |
581 | 571 | tcontextp = &lo_tcontext; | |
582 | context_struct_compute_av(scontext, | ||
583 | &lo_tcontext, | ||
584 | tclass, | ||
585 | &lo_avd, | ||
586 | NULL); | ||
587 | if ((lo_avd.allowed & avd->allowed) == avd->allowed) | ||
588 | return; /* no masked permission */ | ||
589 | masked = ~lo_avd.allowed & avd->allowed; | ||
590 | } | 572 | } |
591 | 573 | ||
592 | if (source->bounds && target->bounds) { | 574 | context_struct_compute_av(&lo_scontext, |
593 | memset(&lo_avd, 0, sizeof(lo_avd)); | 575 | tcontextp, |
594 | /* | 576 | tclass, |
595 | * lo_scontext and lo_tcontext are already | 577 | &lo_avd, |
596 | * set up. | 578 | NULL); |
597 | */ | ||
598 | 579 | ||
599 | context_struct_compute_av(&lo_scontext, | 580 | masked = ~lo_avd.allowed & avd->allowed; |
600 | &lo_tcontext, | ||
601 | tclass, | ||
602 | &lo_avd, | ||
603 | NULL); | ||
604 | if ((lo_avd.allowed & avd->allowed) == avd->allowed) | ||
605 | return; /* no masked permission */ | ||
606 | masked = ~lo_avd.allowed & avd->allowed; | ||
607 | } | ||
608 | 581 | ||
609 | if (masked) { | 582 | if (likely(!masked)) |
610 | /* mask violated permissions */ | 583 | return; /* no masked permission */ |
611 | avd->allowed &= ~masked; | ||
612 | 584 | ||
613 | /* audit masked permissions */ | 585 | /* mask violated permissions */ |
614 | security_dump_masked_av(scontext, tcontext, | 586 | avd->allowed &= ~masked; |
615 | tclass, masked, "bounds"); | 587 | |
616 | } | 588 | /* audit masked permissions */ |
589 | security_dump_masked_av(scontext, tcontext, | ||
590 | tclass, masked, "bounds"); | ||
617 | } | 591 | } |
618 | 592 | ||
619 | /* | 593 | /* |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index b75634dbf53b..87a9741b0d02 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -2255,6 +2255,9 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info, | |||
2255 | struct smack_known *tkp = smk_of_task_struct(p); | 2255 | struct smack_known *tkp = smk_of_task_struct(p); |
2256 | int rc; | 2256 | int rc; |
2257 | 2257 | ||
2258 | if (!sig) | ||
2259 | return 0; /* null signal; existence test */ | ||
2260 | |||
2258 | smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); | 2261 | smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); |
2259 | smk_ad_setfield_u_tsk(&ad, p); | 2262 | smk_ad_setfield_u_tsk(&ad, p); |
2260 | /* | 2263 | /* |
@@ -4020,7 +4023,7 @@ access_check: | |||
4020 | rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in, | 4023 | rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in, |
4021 | MAY_WRITE, rc); | 4024 | MAY_WRITE, rc); |
4022 | if (rc != 0) | 4025 | if (rc != 0) |
4023 | netlbl_skbuff_err(skb, rc, 0); | 4026 | netlbl_skbuff_err(skb, sk->sk_family, rc, 0); |
4024 | break; | 4027 | break; |
4025 | #if IS_ENABLED(CONFIG_IPV6) | 4028 | #if IS_ENABLED(CONFIG_IPV6) |
4026 | case PF_INET6: | 4029 | case PF_INET6: |
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c index 986a6a756868..540bc29e1b5a 100644 --- a/security/tomoyo/gc.c +++ b/security/tomoyo/gc.c | |||
@@ -645,11 +645,6 @@ void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) | |||
645 | } | 645 | } |
646 | } | 646 | } |
647 | spin_unlock(&tomoyo_io_buffer_list_lock); | 647 | spin_unlock(&tomoyo_io_buffer_list_lock); |
648 | if (is_write) { | 648 | if (is_write) |
649 | struct task_struct *task = kthread_create(tomoyo_gc_thread, | 649 | kthread_run(tomoyo_gc_thread, NULL, "GC for TOMOYO"); |
650 | NULL, | ||
651 | "GC for TOMOYO"); | ||
652 | if (!IS_ERR(task)) | ||
653 | wake_up_process(task); | ||
654 | } | ||
655 | } | 650 | } |
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 2e58549b2f02..03f1fa495d74 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
@@ -1021,8 +1021,8 @@ void tracer_stop(int sig) | |||
1021 | typedef void tracer_func_t(struct __test_metadata *_metadata, | 1021 | typedef void tracer_func_t(struct __test_metadata *_metadata, |
1022 | pid_t tracee, int status, void *args); | 1022 | pid_t tracee, int status, void *args); |
1023 | 1023 | ||
1024 | void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, | 1024 | void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, |
1025 | tracer_func_t tracer_func, void *args) | 1025 | tracer_func_t tracer_func, void *args, bool ptrace_syscall) |
1026 | { | 1026 | { |
1027 | int ret = -1; | 1027 | int ret = -1; |
1028 | struct sigaction action = { | 1028 | struct sigaction action = { |
@@ -1042,12 +1042,16 @@ void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, | |||
1042 | /* Wait for attach stop */ | 1042 | /* Wait for attach stop */ |
1043 | wait(NULL); | 1043 | wait(NULL); |
1044 | 1044 | ||
1045 | ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, PTRACE_O_TRACESECCOMP); | 1045 | ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ? |
1046 | PTRACE_O_TRACESYSGOOD : | ||
1047 | PTRACE_O_TRACESECCOMP); | ||
1046 | ASSERT_EQ(0, ret) { | 1048 | ASSERT_EQ(0, ret) { |
1047 | TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); | 1049 | TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); |
1048 | kill(tracee, SIGKILL); | 1050 | kill(tracee, SIGKILL); |
1049 | } | 1051 | } |
1050 | ptrace(PTRACE_CONT, tracee, NULL, 0); | 1052 | ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, |
1053 | tracee, NULL, 0); | ||
1054 | ASSERT_EQ(0, ret); | ||
1051 | 1055 | ||
1052 | /* Unblock the tracee */ | 1056 | /* Unblock the tracee */ |
1053 | ASSERT_EQ(1, write(fd, "A", 1)); | 1057 | ASSERT_EQ(1, write(fd, "A", 1)); |
@@ -1063,12 +1067,13 @@ void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, | |||
1063 | /* Child is dead. Time to go. */ | 1067 | /* Child is dead. Time to go. */ |
1064 | return; | 1068 | return; |
1065 | 1069 | ||
1066 | /* Make sure this is a seccomp event. */ | 1070 | /* Check if this is a seccomp event. */ |
1067 | ASSERT_EQ(true, IS_SECCOMP_EVENT(status)); | 1071 | ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status)); |
1068 | 1072 | ||
1069 | tracer_func(_metadata, tracee, status, args); | 1073 | tracer_func(_metadata, tracee, status, args); |
1070 | 1074 | ||
1071 | ret = ptrace(PTRACE_CONT, tracee, NULL, NULL); | 1075 | ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, |
1076 | tracee, NULL, 0); | ||
1072 | ASSERT_EQ(0, ret); | 1077 | ASSERT_EQ(0, ret); |
1073 | } | 1078 | } |
1074 | /* Directly report the status of our test harness results. */ | 1079 | /* Directly report the status of our test harness results. */ |
@@ -1079,7 +1084,7 @@ void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, | |||
1079 | void cont_handler(int num) | 1084 | void cont_handler(int num) |
1080 | { } | 1085 | { } |
1081 | pid_t setup_trace_fixture(struct __test_metadata *_metadata, | 1086 | pid_t setup_trace_fixture(struct __test_metadata *_metadata, |
1082 | tracer_func_t func, void *args) | 1087 | tracer_func_t func, void *args, bool ptrace_syscall) |
1083 | { | 1088 | { |
1084 | char sync; | 1089 | char sync; |
1085 | int pipefd[2]; | 1090 | int pipefd[2]; |
@@ -1095,7 +1100,8 @@ pid_t setup_trace_fixture(struct __test_metadata *_metadata, | |||
1095 | signal(SIGALRM, cont_handler); | 1100 | signal(SIGALRM, cont_handler); |
1096 | if (tracer_pid == 0) { | 1101 | if (tracer_pid == 0) { |
1097 | close(pipefd[0]); | 1102 | close(pipefd[0]); |
1098 | tracer(_metadata, pipefd[1], tracee, func, args); | 1103 | start_tracer(_metadata, pipefd[1], tracee, func, args, |
1104 | ptrace_syscall); | ||
1099 | syscall(__NR_exit, 0); | 1105 | syscall(__NR_exit, 0); |
1100 | } | 1106 | } |
1101 | close(pipefd[1]); | 1107 | close(pipefd[1]); |
@@ -1177,7 +1183,7 @@ FIXTURE_SETUP(TRACE_poke) | |||
1177 | 1183 | ||
1178 | /* Launch tracer. */ | 1184 | /* Launch tracer. */ |
1179 | self->tracer = setup_trace_fixture(_metadata, tracer_poke, | 1185 | self->tracer = setup_trace_fixture(_metadata, tracer_poke, |
1180 | &self->tracer_args); | 1186 | &self->tracer_args, false); |
1181 | } | 1187 | } |
1182 | 1188 | ||
1183 | FIXTURE_TEARDOWN(TRACE_poke) | 1189 | FIXTURE_TEARDOWN(TRACE_poke) |
@@ -1399,6 +1405,29 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, | |||
1399 | 1405 | ||
1400 | } | 1406 | } |
1401 | 1407 | ||
1408 | void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, | ||
1409 | int status, void *args) | ||
1410 | { | ||
1411 | int ret, nr; | ||
1412 | unsigned long msg; | ||
1413 | static bool entry; | ||
1414 | |||
1415 | /* Make sure we got an empty message. */ | ||
1416 | ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); | ||
1417 | EXPECT_EQ(0, ret); | ||
1418 | EXPECT_EQ(0, msg); | ||
1419 | |||
1420 | /* The only way to tell PTRACE_SYSCALL entry/exit is by counting. */ | ||
1421 | entry = !entry; | ||
1422 | if (!entry) | ||
1423 | return; | ||
1424 | |||
1425 | nr = get_syscall(_metadata, tracee); | ||
1426 | |||
1427 | if (nr == __NR_getpid) | ||
1428 | change_syscall(_metadata, tracee, __NR_getppid); | ||
1429 | } | ||
1430 | |||
1402 | FIXTURE_DATA(TRACE_syscall) { | 1431 | FIXTURE_DATA(TRACE_syscall) { |
1403 | struct sock_fprog prog; | 1432 | struct sock_fprog prog; |
1404 | pid_t tracer, mytid, mypid, parent; | 1433 | pid_t tracer, mytid, mypid, parent; |
@@ -1440,7 +1469,8 @@ FIXTURE_SETUP(TRACE_syscall) | |||
1440 | ASSERT_NE(self->parent, self->mypid); | 1469 | ASSERT_NE(self->parent, self->mypid); |
1441 | 1470 | ||
1442 | /* Launch tracer. */ | 1471 | /* Launch tracer. */ |
1443 | self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL); | 1472 | self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL, |
1473 | false); | ||
1444 | } | 1474 | } |
1445 | 1475 | ||
1446 | FIXTURE_TEARDOWN(TRACE_syscall) | 1476 | FIXTURE_TEARDOWN(TRACE_syscall) |
@@ -1500,6 +1530,130 @@ TEST_F(TRACE_syscall, syscall_dropped) | |||
1500 | EXPECT_NE(self->mytid, syscall(__NR_gettid)); | 1530 | EXPECT_NE(self->mytid, syscall(__NR_gettid)); |
1501 | } | 1531 | } |
1502 | 1532 | ||
1533 | TEST_F(TRACE_syscall, skip_after_RET_TRACE) | ||
1534 | { | ||
1535 | struct sock_filter filter[] = { | ||
1536 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
1537 | offsetof(struct seccomp_data, nr)), | ||
1538 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), | ||
1539 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), | ||
1540 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
1541 | }; | ||
1542 | struct sock_fprog prog = { | ||
1543 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
1544 | .filter = filter, | ||
1545 | }; | ||
1546 | long ret; | ||
1547 | |||
1548 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
1549 | ASSERT_EQ(0, ret); | ||
1550 | |||
1551 | /* Install fixture filter. */ | ||
1552 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); | ||
1553 | ASSERT_EQ(0, ret); | ||
1554 | |||
1555 | /* Install "errno on getppid" filter. */ | ||
1556 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); | ||
1557 | ASSERT_EQ(0, ret); | ||
1558 | |||
1559 | /* Tracer will redirect getpid to getppid, and we should see EPERM. */ | ||
1560 | EXPECT_EQ(-1, syscall(__NR_getpid)); | ||
1561 | EXPECT_EQ(EPERM, errno); | ||
1562 | } | ||
1563 | |||
1564 | TEST_F_SIGNAL(TRACE_syscall, kill_after_RET_TRACE, SIGSYS) | ||
1565 | { | ||
1566 | struct sock_filter filter[] = { | ||
1567 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
1568 | offsetof(struct seccomp_data, nr)), | ||
1569 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), | ||
1570 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), | ||
1571 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
1572 | }; | ||
1573 | struct sock_fprog prog = { | ||
1574 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
1575 | .filter = filter, | ||
1576 | }; | ||
1577 | long ret; | ||
1578 | |||
1579 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
1580 | ASSERT_EQ(0, ret); | ||
1581 | |||
1582 | /* Install fixture filter. */ | ||
1583 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); | ||
1584 | ASSERT_EQ(0, ret); | ||
1585 | |||
1586 | /* Install "death on getppid" filter. */ | ||
1587 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); | ||
1588 | ASSERT_EQ(0, ret); | ||
1589 | |||
1590 | /* Tracer will redirect getpid to getppid, and we should die. */ | ||
1591 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); | ||
1592 | } | ||
1593 | |||
1594 | TEST_F(TRACE_syscall, skip_after_ptrace) | ||
1595 | { | ||
1596 | struct sock_filter filter[] = { | ||
1597 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
1598 | offsetof(struct seccomp_data, nr)), | ||
1599 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), | ||
1600 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), | ||
1601 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
1602 | }; | ||
1603 | struct sock_fprog prog = { | ||
1604 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
1605 | .filter = filter, | ||
1606 | }; | ||
1607 | long ret; | ||
1608 | |||
1609 | /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ | ||
1610 | teardown_trace_fixture(_metadata, self->tracer); | ||
1611 | self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, | ||
1612 | true); | ||
1613 | |||
1614 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
1615 | ASSERT_EQ(0, ret); | ||
1616 | |||
1617 | /* Install "errno on getppid" filter. */ | ||
1618 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); | ||
1619 | ASSERT_EQ(0, ret); | ||
1620 | |||
1621 | /* Tracer will redirect getpid to getppid, and we should see EPERM. */ | ||
1622 | EXPECT_EQ(-1, syscall(__NR_getpid)); | ||
1623 | EXPECT_EQ(EPERM, errno); | ||
1624 | } | ||
1625 | |||
1626 | TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS) | ||
1627 | { | ||
1628 | struct sock_filter filter[] = { | ||
1629 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
1630 | offsetof(struct seccomp_data, nr)), | ||
1631 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), | ||
1632 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), | ||
1633 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
1634 | }; | ||
1635 | struct sock_fprog prog = { | ||
1636 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
1637 | .filter = filter, | ||
1638 | }; | ||
1639 | long ret; | ||
1640 | |||
1641 | /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ | ||
1642 | teardown_trace_fixture(_metadata, self->tracer); | ||
1643 | self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, | ||
1644 | true); | ||
1645 | |||
1646 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
1647 | ASSERT_EQ(0, ret); | ||
1648 | |||
1649 | /* Install "death on getppid" filter. */ | ||
1650 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); | ||
1651 | ASSERT_EQ(0, ret); | ||
1652 | |||
1653 | /* Tracer will redirect getpid to getppid, and we should die. */ | ||
1654 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); | ||
1655 | } | ||
1656 | |||
1503 | #ifndef __NR_seccomp | 1657 | #ifndef __NR_seccomp |
1504 | # if defined(__i386__) | 1658 | # if defined(__i386__) |
1505 | # define __NR_seccomp 354 | 1659 | # define __NR_seccomp 354 |