diff options
Diffstat (limited to 'include/net/tls.h')
| -rw-r--r-- | include/net/tls.h | 86 |
1 files changed, 70 insertions, 16 deletions
diff --git a/include/net/tls.h b/include/net/tls.h index 70c273777fe9..d5c683e8bb22 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
| @@ -83,6 +83,16 @@ struct tls_device { | |||
| 83 | void (*unhash)(struct tls_device *device, struct sock *sk); | 83 | void (*unhash)(struct tls_device *device, struct sock *sk); |
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | enum { | ||
| 87 | TLS_BASE, | ||
| 88 | TLS_SW, | ||
| 89 | #ifdef CONFIG_TLS_DEVICE | ||
| 90 | TLS_HW, | ||
| 91 | #endif | ||
| 92 | TLS_HW_RECORD, | ||
| 93 | TLS_NUM_CONFIG, | ||
| 94 | }; | ||
| 95 | |||
| 86 | struct tls_sw_context_tx { | 96 | struct tls_sw_context_tx { |
| 87 | struct crypto_aead *aead_send; | 97 | struct crypto_aead *aead_send; |
| 88 | struct crypto_wait async_wait; | 98 | struct crypto_wait async_wait; |
| @@ -114,10 +124,6 @@ struct tls_sw_context_rx { | |||
| 114 | struct sk_buff *recv_pkt; | 124 | struct sk_buff *recv_pkt; |
| 115 | u8 control; | 125 | u8 control; |
| 116 | bool decrypted; | 126 | bool decrypted; |
| 117 | |||
| 118 | char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE]; | ||
| 119 | char rx_aad_plaintext[TLS_AAD_SPACE_SIZE]; | ||
| 120 | |||
| 121 | }; | 127 | }; |
| 122 | 128 | ||
| 123 | struct tls_record_info { | 129 | struct tls_record_info { |
| @@ -128,7 +134,7 @@ struct tls_record_info { | |||
| 128 | skb_frag_t frags[MAX_SKB_FRAGS]; | 134 | skb_frag_t frags[MAX_SKB_FRAGS]; |
| 129 | }; | 135 | }; |
| 130 | 136 | ||
| 131 | struct tls_offload_context { | 137 | struct tls_offload_context_tx { |
| 132 | struct crypto_aead *aead_send; | 138 | struct crypto_aead *aead_send; |
| 133 | spinlock_t lock; /* protects records list */ | 139 | spinlock_t lock; /* protects records list */ |
| 134 | struct list_head records_list; | 140 | struct list_head records_list; |
| @@ -147,8 +153,8 @@ struct tls_offload_context { | |||
| 147 | #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) | 153 | #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) |
| 148 | }; | 154 | }; |
| 149 | 155 | ||
| 150 | #define TLS_OFFLOAD_CONTEXT_SIZE \ | 156 | #define TLS_OFFLOAD_CONTEXT_SIZE_TX \ |
| 151 | (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \ | 157 | (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \ |
| 152 | TLS_DRIVER_STATE_SIZE) | 158 | TLS_DRIVER_STATE_SIZE) |
| 153 | 159 | ||
| 154 | enum { | 160 | enum { |
| @@ -197,6 +203,7 @@ struct tls_context { | |||
| 197 | int (*push_pending_record)(struct sock *sk, int flags); | 203 | int (*push_pending_record)(struct sock *sk, int flags); |
| 198 | 204 | ||
| 199 | void (*sk_write_space)(struct sock *sk); | 205 | void (*sk_write_space)(struct sock *sk); |
| 206 | void (*sk_destruct)(struct sock *sk); | ||
| 200 | void (*sk_proto_close)(struct sock *sk, long timeout); | 207 | void (*sk_proto_close)(struct sock *sk, long timeout); |
| 201 | 208 | ||
| 202 | int (*setsockopt)(struct sock *sk, int level, | 209 | int (*setsockopt)(struct sock *sk, int level, |
| @@ -209,13 +216,27 @@ struct tls_context { | |||
| 209 | void (*unhash)(struct sock *sk); | 216 | void (*unhash)(struct sock *sk); |
| 210 | }; | 217 | }; |
| 211 | 218 | ||
| 219 | struct tls_offload_context_rx { | ||
| 220 | /* sw must be the first member of tls_offload_context_rx */ | ||
| 221 | struct tls_sw_context_rx sw; | ||
| 222 | atomic64_t resync_req; | ||
| 223 | u8 driver_state[]; | ||
| 224 | /* The TLS layer reserves room for driver specific state | ||
| 225 | * Currently the belief is that there is not enough | ||
| 226 | * driver specific state to justify another layer of indirection | ||
| 227 | */ | ||
| 228 | }; | ||
| 229 | |||
| 230 | #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ | ||
| 231 | (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \ | ||
| 232 | TLS_DRIVER_STATE_SIZE) | ||
| 233 | |||
| 212 | int wait_on_pending_writer(struct sock *sk, long *timeo); | 234 | int wait_on_pending_writer(struct sock *sk, long *timeo); |
| 213 | int tls_sk_query(struct sock *sk, int optname, char __user *optval, | 235 | int tls_sk_query(struct sock *sk, int optname, char __user *optval, |
| 214 | int __user *optlen); | 236 | int __user *optlen); |
| 215 | int tls_sk_attach(struct sock *sk, int optname, char __user *optval, | 237 | int tls_sk_attach(struct sock *sk, int optname, char __user *optval, |
| 216 | unsigned int optlen); | 238 | unsigned int optlen); |
| 217 | 239 | ||
| 218 | |||
| 219 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); | 240 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); |
| 220 | int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | 241 | int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); |
| 221 | int tls_sw_sendpage(struct sock *sk, struct page *page, | 242 | int tls_sw_sendpage(struct sock *sk, struct page *page, |
| @@ -223,6 +244,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, | |||
| 223 | void tls_sw_close(struct sock *sk, long timeout); | 244 | void tls_sw_close(struct sock *sk, long timeout); |
| 224 | void tls_sw_free_resources_tx(struct sock *sk); | 245 | void tls_sw_free_resources_tx(struct sock *sk); |
| 225 | void tls_sw_free_resources_rx(struct sock *sk); | 246 | void tls_sw_free_resources_rx(struct sock *sk); |
| 247 | void tls_sw_release_resources_rx(struct sock *sk); | ||
| 226 | int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | 248 | int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 227 | int nonblock, int flags, int *addr_len); | 249 | int nonblock, int flags, int *addr_len); |
| 228 | unsigned int tls_sw_poll(struct file *file, struct socket *sock, | 250 | unsigned int tls_sw_poll(struct file *file, struct socket *sock, |
| @@ -239,7 +261,7 @@ void tls_device_sk_destruct(struct sock *sk); | |||
| 239 | void tls_device_init(void); | 261 | void tls_device_init(void); |
| 240 | void tls_device_cleanup(void); | 262 | void tls_device_cleanup(void); |
| 241 | 263 | ||
| 242 | struct tls_record_info *tls_get_record(struct tls_offload_context *context, | 264 | struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, |
| 243 | u32 seq, u64 *p_record_sn); | 265 | u32 seq, u64 *p_record_sn); |
| 244 | 266 | ||
| 245 | static inline bool tls_record_is_start_marker(struct tls_record_info *rec) | 267 | static inline bool tls_record_is_start_marker(struct tls_record_info *rec) |
| @@ -289,11 +311,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) | |||
| 289 | return tls_ctx->pending_open_record_frags; | 311 | return tls_ctx->pending_open_record_frags; |
| 290 | } | 312 | } |
| 291 | 313 | ||
| 314 | struct sk_buff * | ||
| 315 | tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, | ||
| 316 | struct sk_buff *skb); | ||
| 317 | |||
| 292 | static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) | 318 | static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) |
| 293 | { | 319 | { |
| 294 | return sk_fullsock(sk) && | 320 | #ifdef CONFIG_SOCK_VALIDATE_XMIT |
| 295 | /* matches smp_store_release in tls_set_device_offload */ | 321 | return sk_fullsock(sk) & |
| 296 | smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct; | 322 | (smp_load_acquire(&sk->sk_validate_xmit_skb) == |
| 323 | &tls_validate_xmit_skb); | ||
| 324 | #else | ||
| 325 | return false; | ||
| 326 | #endif | ||
| 297 | } | 327 | } |
| 298 | 328 | ||
| 299 | static inline void tls_err_abort(struct sock *sk, int err) | 329 | static inline void tls_err_abort(struct sock *sk, int err) |
| @@ -380,23 +410,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx( | |||
| 380 | return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; | 410 | return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; |
| 381 | } | 411 | } |
| 382 | 412 | ||
| 383 | static inline struct tls_offload_context *tls_offload_ctx( | 413 | static inline struct tls_offload_context_tx * |
| 384 | const struct tls_context *tls_ctx) | 414 | tls_offload_ctx_tx(const struct tls_context *tls_ctx) |
| 415 | { | ||
| 416 | return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; | ||
| 417 | } | ||
| 418 | |||
| 419 | static inline struct tls_offload_context_rx * | ||
| 420 | tls_offload_ctx_rx(const struct tls_context *tls_ctx) | ||
| 385 | { | 421 | { |
| 386 | return (struct tls_offload_context *)tls_ctx->priv_ctx_tx; | 422 | return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; |
| 387 | } | 423 | } |
| 388 | 424 | ||
| 425 | /* The TLS context is valid until sk_destruct is called */ | ||
| 426 | static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) | ||
| 427 | { | ||
| 428 | struct tls_context *tls_ctx = tls_get_ctx(sk); | ||
| 429 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); | ||
| 430 | |||
| 431 | atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1)); | ||
| 432 | } | ||
| 433 | |||
| 434 | |||
| 389 | int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, | 435 | int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, |
| 390 | unsigned char *record_type); | 436 | unsigned char *record_type); |
| 391 | void tls_register_device(struct tls_device *device); | 437 | void tls_register_device(struct tls_device *device); |
| 392 | void tls_unregister_device(struct tls_device *device); | 438 | void tls_unregister_device(struct tls_device *device); |
| 439 | int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); | ||
| 440 | int decrypt_skb(struct sock *sk, struct sk_buff *skb, | ||
| 441 | struct scatterlist *sgout); | ||
| 393 | 442 | ||
| 394 | struct sk_buff *tls_validate_xmit_skb(struct sock *sk, | 443 | struct sk_buff *tls_validate_xmit_skb(struct sock *sk, |
| 395 | struct net_device *dev, | 444 | struct net_device *dev, |
| 396 | struct sk_buff *skb); | 445 | struct sk_buff *skb); |
| 397 | 446 | ||
| 398 | int tls_sw_fallback_init(struct sock *sk, | 447 | int tls_sw_fallback_init(struct sock *sk, |
| 399 | struct tls_offload_context *offload_ctx, | 448 | struct tls_offload_context_tx *offload_ctx, |
| 400 | struct tls_crypto_info *crypto_info); | 449 | struct tls_crypto_info *crypto_info); |
| 401 | 450 | ||
| 451 | int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); | ||
| 452 | |||
| 453 | void tls_device_offload_cleanup_rx(struct sock *sk); | ||
| 454 | void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn); | ||
| 455 | |||
| 402 | #endif /* _TLS_OFFLOAD_H */ | 456 | #endif /* _TLS_OFFLOAD_H */ |
