diff options
| author | Boris Pismenny <borisp@mellanox.com> | 2019-02-27 10:38:04 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-03-04 01:10:16 -0500 |
| commit | 7463d3a2db0efea3701aab5eeb310e0d8157aff7 (patch) | |
| tree | 1fd12490ff9e33b93b9b503c89e2c9cb48c08082 /net/tls | |
| parent | 94850257cf0f88b20db7644f28bfedc7d284de15 (diff) | |
tls: Fix write space handling
TLS device cannot use the sw context. This patch returns the original
tls device write space handler and moves the sw/device specific portions
to the relevant files.
Also, we remove the write_space call for the tls_sw flow, because it
handles partial records in its delayed tx work handler.
Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance")
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
| -rw-r--r-- | net/tls/tls_device.c | 17 | ||||
| -rw-r--r-- | net/tls/tls_main.c | 15 | ||||
| -rw-r--r-- | net/tls/tls_sw.c | 13 |
3 files changed, 36 insertions, 9 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 3e5e8e021a87..4a1da837a733 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c | |||
| @@ -546,6 +546,23 @@ static int tls_device_push_pending_record(struct sock *sk, int flags) | |||
| 546 | return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); | 546 | return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); |
| 547 | } | 547 | } |
| 548 | 548 | ||
| 549 | void tls_device_write_space(struct sock *sk, struct tls_context *ctx) | ||
| 550 | { | ||
| 551 | int rc = 0; | ||
| 552 | |||
| 553 | if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) { | ||
| 554 | gfp_t sk_allocation = sk->sk_allocation; | ||
| 555 | |||
| 556 | sk->sk_allocation = GFP_ATOMIC; | ||
| 557 | rc = tls_push_partial_record(sk, ctx, | ||
| 558 | MSG_DONTWAIT | MSG_NOSIGNAL); | ||
| 559 | sk->sk_allocation = sk_allocation; | ||
| 560 | } | ||
| 561 | |||
| 562 | if (!rc) | ||
| 563 | ctx->sk_write_space(sk); | ||
| 564 | } | ||
| 565 | |||
| 549 | void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) | 566 | void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) |
| 550 | { | 567 | { |
| 551 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 568 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 7e05af75536d..17e8667917aa 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
| @@ -212,7 +212,6 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, | |||
| 212 | static void tls_write_space(struct sock *sk) | 212 | static void tls_write_space(struct sock *sk) |
| 213 | { | 213 | { |
| 214 | struct tls_context *ctx = tls_get_ctx(sk); | 214 | struct tls_context *ctx = tls_get_ctx(sk); |
| 215 | struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); | ||
| 216 | 215 | ||
| 217 | /* If in_tcp_sendpages call lower protocol write space handler | 216 | /* If in_tcp_sendpages call lower protocol write space handler |
| 218 | * to ensure we wake up any waiting operations there. For example | 217 | * to ensure we wake up any waiting operations there. For example |
| @@ -223,14 +222,12 @@ static void tls_write_space(struct sock *sk) | |||
| 223 | return; | 222 | return; |
| 224 | } | 223 | } |
| 225 | 224 | ||
| 226 | /* Schedule the transmission if tx list is ready */ | 225 | #ifdef CONFIG_TLS_DEVICE |
| 227 | if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { | 226 | if (ctx->tx_conf == TLS_HW) |
| 228 | /* Schedule the transmission */ | 227 | tls_device_write_space(sk, ctx); |
| 229 | if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) | 228 | else |
| 230 | schedule_delayed_work(&tx_ctx->tx_work.work, 0); | 229 | #endif |
| 231 | } | 230 | tls_sw_write_space(sk, ctx); |
| 232 | |||
| 233 | ctx->sk_write_space(sk); | ||
| 234 | } | 231 | } |
| 235 | 232 | ||
| 236 | static void tls_ctx_free(struct tls_context *ctx) | 233 | static void tls_ctx_free(struct tls_context *ctx) |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 1cc830582fa8..917caacd4d31 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -2126,6 +2126,19 @@ static void tx_work_handler(struct work_struct *work) | |||
| 2126 | release_sock(sk); | 2126 | release_sock(sk); |
| 2127 | } | 2127 | } |
| 2128 | 2128 | ||
| 2129 | void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) | ||
| 2130 | { | ||
| 2131 | struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); | ||
| 2132 | |||
| 2133 | /* Schedule the transmission if tx list is ready */ | ||
| 2134 | if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { | ||
| 2135 | /* Schedule the transmission */ | ||
| 2136 | if (!test_and_set_bit(BIT_TX_SCHEDULED, | ||
| 2137 | &tx_ctx->tx_bitmask)) | ||
| 2138 | schedule_delayed_work(&tx_ctx->tx_work.work, 0); | ||
| 2139 | } | ||
| 2140 | } | ||
| 2141 | |||
| 2129 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | 2142 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) |
| 2130 | { | 2143 | { |
| 2131 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 2144 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
