diff options
author | David S. Miller <davem@davemloft.net> | 2019-04-27 16:52:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-27 16:52:54 -0400 |
commit | c7881b4a97e21b617b8243094dfa4b62028b956c (patch) | |
tree | ab34b48256309a918c45e6dfa87112a2926e0cd1 | |
parent | 30e5a9a5ba853b896250f0665a2e10bbafa2f6bc (diff) | |
parent | 63a1c95f3fe48b4e9fe0c261b376e5e527b71b25 (diff) |
Merge branch 'net-tls-small-code-cleanup'
Jakub Kicinski says:
====================
net/tls: small code cleanup
This small patch set cleans up tls (mostly offload parts).
Other than avoiding unnecessary error messages - no functional
changes here.
v2 (Saeed):
- fix up Review tags;
- remove the warning on failure completely.
====================
Reviewed-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 23 | ||||
-rw-r--r-- | include/net/tls.h | 21 | ||||
-rw-r--r-- | net/tls/tls_device.c | 44 |
3 files changed, 38 insertions, 50 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c46d218a0456..44b47e9df94a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -914,34 +914,13 @@ struct xfrmdev_ops { | |||
914 | }; | 914 | }; |
915 | #endif | 915 | #endif |
916 | 916 | ||
917 | #if IS_ENABLED(CONFIG_TLS_DEVICE) | ||
918 | enum tls_offload_ctx_dir { | ||
919 | TLS_OFFLOAD_CTX_DIR_RX, | ||
920 | TLS_OFFLOAD_CTX_DIR_TX, | ||
921 | }; | ||
922 | |||
923 | struct tls_crypto_info; | ||
924 | struct tls_context; | ||
925 | |||
926 | struct tlsdev_ops { | ||
927 | int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, | ||
928 | enum tls_offload_ctx_dir direction, | ||
929 | struct tls_crypto_info *crypto_info, | ||
930 | u32 start_offload_tcp_sn); | ||
931 | void (*tls_dev_del)(struct net_device *netdev, | ||
932 | struct tls_context *ctx, | ||
933 | enum tls_offload_ctx_dir direction); | ||
934 | void (*tls_dev_resync_rx)(struct net_device *netdev, | ||
935 | struct sock *sk, u32 seq, u64 rcd_sn); | ||
936 | }; | ||
937 | #endif | ||
938 | |||
939 | struct dev_ifalias { | 917 | struct dev_ifalias { |
940 | struct rcu_head rcuhead; | 918 | struct rcu_head rcuhead; |
941 | char ifalias[]; | 919 | char ifalias[]; |
942 | }; | 920 | }; |
943 | 921 | ||
944 | struct devlink; | 922 | struct devlink; |
923 | struct tlsdev_ops; | ||
945 | 924 | ||
946 | /* | 925 | /* |
947 | * This structure defines the management hooks for network devices. | 926 | * This structure defines the management hooks for network devices. |
diff --git a/include/net/tls.h b/include/net/tls.h index d9d0ac66f040..39ea62f0c1f6 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
@@ -277,6 +277,23 @@ struct tls_context { | |||
277 | void (*unhash)(struct sock *sk); | 277 | void (*unhash)(struct sock *sk); |
278 | }; | 278 | }; |
279 | 279 | ||
280 | enum tls_offload_ctx_dir { | ||
281 | TLS_OFFLOAD_CTX_DIR_RX, | ||
282 | TLS_OFFLOAD_CTX_DIR_TX, | ||
283 | }; | ||
284 | |||
285 | struct tlsdev_ops { | ||
286 | int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, | ||
287 | enum tls_offload_ctx_dir direction, | ||
288 | struct tls_crypto_info *crypto_info, | ||
289 | u32 start_offload_tcp_sn); | ||
290 | void (*tls_dev_del)(struct net_device *netdev, | ||
291 | struct tls_context *ctx, | ||
292 | enum tls_offload_ctx_dir direction); | ||
293 | void (*tls_dev_resync_rx)(struct net_device *netdev, | ||
294 | struct sock *sk, u32 seq, u64 rcd_sn); | ||
295 | }; | ||
296 | |||
280 | struct tls_offload_context_rx { | 297 | struct tls_offload_context_rx { |
281 | /* sw must be the first member of tls_offload_context_rx */ | 298 | /* sw must be the first member of tls_offload_context_rx */ |
282 | struct tls_sw_context_rx sw; | 299 | struct tls_sw_context_rx sw; |
@@ -317,7 +334,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); | |||
317 | int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | 334 | int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); |
318 | int tls_device_sendpage(struct sock *sk, struct page *page, | 335 | int tls_device_sendpage(struct sock *sk, struct page *page, |
319 | int offset, size_t size, int flags); | 336 | int offset, size_t size, int flags); |
320 | void tls_device_sk_destruct(struct sock *sk); | ||
321 | void tls_device_free_resources_tx(struct sock *sk); | 337 | void tls_device_free_resources_tx(struct sock *sk); |
322 | void tls_device_init(void); | 338 | void tls_device_init(void); |
323 | void tls_device_cleanup(void); | 339 | void tls_device_cleanup(void); |
@@ -336,7 +352,6 @@ static inline u32 tls_record_start_seq(struct tls_record_info *rec) | |||
336 | return rec->end_seq - rec->len; | 352 | return rec->end_seq - rec->len; |
337 | } | 353 | } |
338 | 354 | ||
339 | void tls_sk_destruct(struct sock *sk, struct tls_context *ctx); | ||
340 | int tls_push_sg(struct sock *sk, struct tls_context *ctx, | 355 | int tls_push_sg(struct sock *sk, struct tls_context *ctx, |
341 | struct scatterlist *sg, u16 first_offset, | 356 | struct scatterlist *sg, u16 first_offset, |
342 | int flags); | 357 | int flags); |
@@ -547,7 +562,7 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) | |||
547 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 562 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
548 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); | 563 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); |
549 | 564 | ||
550 | atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1)); | 565 | atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); |
551 | } | 566 | } |
552 | 567 | ||
553 | 568 | ||
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index cc0256939eb6..26f26e71ef3f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c | |||
@@ -89,22 +89,6 @@ static void tls_device_gc_task(struct work_struct *work) | |||
89 | } | 89 | } |
90 | } | 90 | } |
91 | 91 | ||
92 | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, | ||
93 | struct net_device *netdev) | ||
94 | { | ||
95 | if (sk->sk_destruct != tls_device_sk_destruct) { | ||
96 | refcount_set(&ctx->refcount, 1); | ||
97 | dev_hold(netdev); | ||
98 | ctx->netdev = netdev; | ||
99 | spin_lock_irq(&tls_device_lock); | ||
100 | list_add_tail(&ctx->list, &tls_device_list); | ||
101 | spin_unlock_irq(&tls_device_lock); | ||
102 | |||
103 | ctx->sk_destruct = sk->sk_destruct; | ||
104 | sk->sk_destruct = tls_device_sk_destruct; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void tls_device_queue_ctx_destruction(struct tls_context *ctx) | 92 | static void tls_device_queue_ctx_destruction(struct tls_context *ctx) |
109 | { | 93 | { |
110 | unsigned long flags; | 94 | unsigned long flags; |
@@ -199,7 +183,7 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) | |||
199 | * socket and no in-flight SKBs associated with this | 183 | * socket and no in-flight SKBs associated with this |
200 | * socket, so it is safe to free all the resources. | 184 | * socket, so it is safe to free all the resources. |
201 | */ | 185 | */ |
202 | void tls_device_sk_destruct(struct sock *sk) | 186 | static void tls_device_sk_destruct(struct sock *sk) |
203 | { | 187 | { |
204 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 188 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
205 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); | 189 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
@@ -217,7 +201,6 @@ void tls_device_sk_destruct(struct sock *sk) | |||
217 | if (refcount_dec_and_test(&tls_ctx->refcount)) | 201 | if (refcount_dec_and_test(&tls_ctx->refcount)) |
218 | tls_device_queue_ctx_destruction(tls_ctx); | 202 | tls_device_queue_ctx_destruction(tls_ctx); |
219 | } | 203 | } |
220 | EXPORT_SYMBOL(tls_device_sk_destruct); | ||
221 | 204 | ||
222 | void tls_device_free_resources_tx(struct sock *sk) | 205 | void tls_device_free_resources_tx(struct sock *sk) |
223 | { | 206 | { |
@@ -584,7 +567,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) | |||
584 | 567 | ||
585 | rx_ctx = tls_offload_ctx_rx(tls_ctx); | 568 | rx_ctx = tls_offload_ctx_rx(tls_ctx); |
586 | resync_req = atomic64_read(&rx_ctx->resync_req); | 569 | resync_req = atomic64_read(&rx_ctx->resync_req); |
587 | req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1); | 570 | req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1); |
588 | is_req_pending = resync_req; | 571 | is_req_pending = resync_req; |
589 | 572 | ||
590 | if (unlikely(is_req_pending) && req_seq == seq && | 573 | if (unlikely(is_req_pending) && req_seq == seq && |
@@ -682,6 +665,22 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) | |||
682 | tls_device_reencrypt(sk, skb); | 665 | tls_device_reencrypt(sk, skb); |
683 | } | 666 | } |
684 | 667 | ||
668 | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, | ||
669 | struct net_device *netdev) | ||
670 | { | ||
671 | if (sk->sk_destruct != tls_device_sk_destruct) { | ||
672 | refcount_set(&ctx->refcount, 1); | ||
673 | dev_hold(netdev); | ||
674 | ctx->netdev = netdev; | ||
675 | spin_lock_irq(&tls_device_lock); | ||
676 | list_add_tail(&ctx->list, &tls_device_list); | ||
677 | spin_unlock_irq(&tls_device_lock); | ||
678 | |||
679 | ctx->sk_destruct = sk->sk_destruct; | ||
680 | sk->sk_destruct = tls_device_sk_destruct; | ||
681 | } | ||
682 | } | ||
683 | |||
685 | int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | 684 | int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) |
686 | { | 685 | { |
687 | u16 nonce_size, tag_size, iv_size, rec_seq_size; | 686 | u16 nonce_size, tag_size, iv_size, rec_seq_size; |
@@ -865,8 +864,6 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) | |||
865 | } | 864 | } |
866 | 865 | ||
867 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { | 866 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { |
868 | pr_err_ratelimited("%s: netdev %s with no TLS offload\n", | ||
869 | __func__, netdev->name); | ||
870 | rc = -ENOTSUPP; | 867 | rc = -ENOTSUPP; |
871 | goto release_netdev; | 868 | goto release_netdev; |
872 | } | 869 | } |
@@ -894,11 +891,8 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) | |||
894 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, | 891 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, |
895 | &ctx->crypto_recv.info, | 892 | &ctx->crypto_recv.info, |
896 | tcp_sk(sk)->copied_seq); | 893 | tcp_sk(sk)->copied_seq); |
897 | if (rc) { | 894 | if (rc) |
898 | pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", | ||
899 | __func__); | ||
900 | goto free_sw_resources; | 895 | goto free_sw_resources; |
901 | } | ||
902 | 896 | ||
903 | tls_device_attach(ctx, sk, netdev); | 897 | tls_device_attach(ctx, sk, netdev); |
904 | goto release_netdev; | 898 | goto release_netdev; |