aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tls.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tls.h')
-rw-r--r--include/net/tls.h86
1 files changed, 70 insertions, 16 deletions
diff --git a/include/net/tls.h b/include/net/tls.h
index 70c273777fe9..d5c683e8bb22 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -83,6 +83,16 @@ struct tls_device {
83 void (*unhash)(struct tls_device *device, struct sock *sk); 83 void (*unhash)(struct tls_device *device, struct sock *sk);
84}; 84};
85 85
86enum {
87 TLS_BASE,
88 TLS_SW,
89#ifdef CONFIG_TLS_DEVICE
90 TLS_HW,
91#endif
92 TLS_HW_RECORD,
93 TLS_NUM_CONFIG,
94};
95
86struct tls_sw_context_tx { 96struct tls_sw_context_tx {
87 struct crypto_aead *aead_send; 97 struct crypto_aead *aead_send;
88 struct crypto_wait async_wait; 98 struct crypto_wait async_wait;
@@ -114,10 +124,6 @@ struct tls_sw_context_rx {
114 struct sk_buff *recv_pkt; 124 struct sk_buff *recv_pkt;
115 u8 control; 125 u8 control;
116 bool decrypted; 126 bool decrypted;
117
118 char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
119 char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
120
121}; 127};
122 128
123struct tls_record_info { 129struct tls_record_info {
@@ -128,7 +134,7 @@ struct tls_record_info {
128 skb_frag_t frags[MAX_SKB_FRAGS]; 134 skb_frag_t frags[MAX_SKB_FRAGS];
129}; 135};
130 136
131struct tls_offload_context { 137struct tls_offload_context_tx {
132 struct crypto_aead *aead_send; 138 struct crypto_aead *aead_send;
133 spinlock_t lock; /* protects records list */ 139 spinlock_t lock; /* protects records list */
134 struct list_head records_list; 140 struct list_head records_list;
@@ -147,8 +153,8 @@ struct tls_offload_context {
147#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) 153#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
148}; 154};
149 155
150#define TLS_OFFLOAD_CONTEXT_SIZE \ 156#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
151 (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \ 157 (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
152 TLS_DRIVER_STATE_SIZE) 158 TLS_DRIVER_STATE_SIZE)
153 159
154enum { 160enum {
@@ -197,6 +203,7 @@ struct tls_context {
197 int (*push_pending_record)(struct sock *sk, int flags); 203 int (*push_pending_record)(struct sock *sk, int flags);
198 204
199 void (*sk_write_space)(struct sock *sk); 205 void (*sk_write_space)(struct sock *sk);
206 void (*sk_destruct)(struct sock *sk);
200 void (*sk_proto_close)(struct sock *sk, long timeout); 207 void (*sk_proto_close)(struct sock *sk, long timeout);
201 208
202 int (*setsockopt)(struct sock *sk, int level, 209 int (*setsockopt)(struct sock *sk, int level,
@@ -209,13 +216,27 @@ struct tls_context {
209 void (*unhash)(struct sock *sk); 216 void (*unhash)(struct sock *sk);
210}; 217};
211 218
219struct tls_offload_context_rx {
220 /* sw must be the first member of tls_offload_context_rx */
221 struct tls_sw_context_rx sw;
222 atomic64_t resync_req;
223 u8 driver_state[];
224 /* The TLS layer reserves room for driver specific state
225 * Currently the belief is that there is not enough
226 * driver specific state to justify another layer of indirection
227 */
228};
229
230#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
231 (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
232 TLS_DRIVER_STATE_SIZE)
233
212int wait_on_pending_writer(struct sock *sk, long *timeo); 234int wait_on_pending_writer(struct sock *sk, long *timeo);
213int tls_sk_query(struct sock *sk, int optname, char __user *optval, 235int tls_sk_query(struct sock *sk, int optname, char __user *optval,
214 int __user *optlen); 236 int __user *optlen);
215int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 237int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
216 unsigned int optlen); 238 unsigned int optlen);
217 239
218
219int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 240int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
220int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 241int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
221int tls_sw_sendpage(struct sock *sk, struct page *page, 242int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -223,6 +244,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
223void tls_sw_close(struct sock *sk, long timeout); 244void tls_sw_close(struct sock *sk, long timeout);
224void tls_sw_free_resources_tx(struct sock *sk); 245void tls_sw_free_resources_tx(struct sock *sk);
225void tls_sw_free_resources_rx(struct sock *sk); 246void tls_sw_free_resources_rx(struct sock *sk);
247void tls_sw_release_resources_rx(struct sock *sk);
226int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 248int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
227 int nonblock, int flags, int *addr_len); 249 int nonblock, int flags, int *addr_len);
228unsigned int tls_sw_poll(struct file *file, struct socket *sock, 250unsigned int tls_sw_poll(struct file *file, struct socket *sock,
@@ -239,7 +261,7 @@ void tls_device_sk_destruct(struct sock *sk);
239void tls_device_init(void); 261void tls_device_init(void);
240void tls_device_cleanup(void); 262void tls_device_cleanup(void);
241 263
242struct tls_record_info *tls_get_record(struct tls_offload_context *context, 264struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
243 u32 seq, u64 *p_record_sn); 265 u32 seq, u64 *p_record_sn);
244 266
245static inline bool tls_record_is_start_marker(struct tls_record_info *rec) 267static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
@@ -289,11 +311,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
289 return tls_ctx->pending_open_record_frags; 311 return tls_ctx->pending_open_record_frags;
290} 312}
291 313
314struct sk_buff *
315tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
316 struct sk_buff *skb);
317
292static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) 318static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
293{ 319{
294 return sk_fullsock(sk) && 320#ifdef CONFIG_SOCK_VALIDATE_XMIT
295 /* matches smp_store_release in tls_set_device_offload */ 321 return sk_fullsock(sk) &
296 smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct; 322 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
323 &tls_validate_xmit_skb);
324#else
325 return false;
326#endif
297} 327}
298 328
299static inline void tls_err_abort(struct sock *sk, int err) 329static inline void tls_err_abort(struct sock *sk, int err)
@@ -380,23 +410,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
380 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; 410 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
381} 411}
382 412
383static inline struct tls_offload_context *tls_offload_ctx( 413static inline struct tls_offload_context_tx *
384 const struct tls_context *tls_ctx) 414tls_offload_ctx_tx(const struct tls_context *tls_ctx)
415{
416 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
417}
418
419static inline struct tls_offload_context_rx *
420tls_offload_ctx_rx(const struct tls_context *tls_ctx)
385{ 421{
386 return (struct tls_offload_context *)tls_ctx->priv_ctx_tx; 422 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
387} 423}
388 424
425/* The TLS context is valid until sk_destruct is called */
426static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
427{
428 struct tls_context *tls_ctx = tls_get_ctx(sk);
429 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
430
431 atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
432}
433
434
389int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 435int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
390 unsigned char *record_type); 436 unsigned char *record_type);
391void tls_register_device(struct tls_device *device); 437void tls_register_device(struct tls_device *device);
392void tls_unregister_device(struct tls_device *device); 438void tls_unregister_device(struct tls_device *device);
439int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
440int decrypt_skb(struct sock *sk, struct sk_buff *skb,
441 struct scatterlist *sgout);
393 442
394struct sk_buff *tls_validate_xmit_skb(struct sock *sk, 443struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
395 struct net_device *dev, 444 struct net_device *dev,
396 struct sk_buff *skb); 445 struct sk_buff *skb);
397 446
398int tls_sw_fallback_init(struct sock *sk, 447int tls_sw_fallback_init(struct sock *sk,
399 struct tls_offload_context *offload_ctx, 448 struct tls_offload_context_tx *offload_ctx,
400 struct tls_crypto_info *crypto_info); 449 struct tls_crypto_info *crypto_info);
401 450
451int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
452
453void tls_device_offload_cleanup_rx(struct sock *sk);
454void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
455
402#endif /* _TLS_OFFLOAD_H */ 456#endif /* _TLS_OFFLOAD_H */