aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls/tls_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tls/tls_main.c')
-rw-r--r--net/tls/tls_main.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 7e05af75536d..17e8667917aa 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -212,7 +212,6 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
212static void tls_write_space(struct sock *sk) 212static void tls_write_space(struct sock *sk)
213{ 213{
214 struct tls_context *ctx = tls_get_ctx(sk); 214 struct tls_context *ctx = tls_get_ctx(sk);
215 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
216 215
217 /* If in_tcp_sendpages call lower protocol write space handler 216 /* If in_tcp_sendpages call lower protocol write space handler
218 * to ensure we wake up any waiting operations there. For example 217 * to ensure we wake up any waiting operations there. For example
@@ -223,14 +222,12 @@ static void tls_write_space(struct sock *sk)
223 return; 222 return;
224 } 223 }
225 224
226 /* Schedule the transmission if tx list is ready */ 225#ifdef CONFIG_TLS_DEVICE
227 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { 226 if (ctx->tx_conf == TLS_HW)
228 /* Schedule the transmission */ 227 tls_device_write_space(sk, ctx);
229 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 228 else
230 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 229#endif
231 } 230 tls_sw_write_space(sk, ctx);
232
233 ctx->sk_write_space(sk);
234} 231}
235 232
236static void tls_ctx_free(struct tls_context *ctx) 233static void tls_ctx_free(struct tls_context *ctx)