diff options
author | Atul Gupta <atul.gupta@chelsio.com> | 2018-03-31 12:11:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-31 23:37:32 -0400 |
commit | a089439478734a6a0aa2eabbc03113e0c34db282 (patch) | |
tree | 21a0ba879c7dfa2a6dde1012fcfc12f33656e680 | |
parent | a6779341a173aa8cedb5985e0c21c5d7c94c270a (diff) |
crypto: chtls - Register chtls with net tls
Register chtls as Inline TLS driver, chtls is ULD to cxgb4.
Setsockopt to program (tx/rx) keys on chip.
Support AES GCM of key size 128.
Support both Inline Rx and Tx.
Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
Reviewed-by: Casey Leedom <leedom@chelsio.com>
Reviewed-by: Michael Werner <werner@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_main.c | 575 |
1 files changed, 575 insertions, 0 deletions
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c new file mode 100644 index 000000000000..04b316f86ebd --- /dev/null +++ b/drivers/crypto/chelsio/chtls/chtls_main.c | |||
@@ -0,0 +1,575 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2018 Chelsio Communications, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * Written by: Atul Gupta (atul.gupta@chelsio.com) | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/skbuff.h> | ||
13 | #include <linux/socket.h> | ||
14 | #include <linux/hash.h> | ||
15 | #include <linux/in.h> | ||
16 | #include <linux/net.h> | ||
17 | #include <linux/ip.h> | ||
18 | #include <linux/tcp.h> | ||
19 | #include <net/tcp.h> | ||
20 | #include <net/tls.h> | ||
21 | |||
22 | #include "chtls.h" | ||
23 | #include "chtls_cm.h" | ||
24 | |||
25 | #define DRV_NAME "chtls" | ||
26 | |||
27 | /* | ||
28 | * chtls device management | ||
29 | * maintains a list of the chtls devices | ||
30 | */ | ||
31 | static LIST_HEAD(cdev_list); | ||
32 | static DEFINE_MUTEX(cdev_mutex); | ||
33 | static DEFINE_MUTEX(cdev_list_lock); | ||
34 | |||
35 | static DEFINE_MUTEX(notify_mutex); | ||
36 | static RAW_NOTIFIER_HEAD(listen_notify_list); | ||
37 | static struct proto chtls_cpl_prot; | ||
38 | struct request_sock_ops chtls_rsk_ops; | ||
39 | static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT; | ||
40 | |||
41 | static void register_listen_notifier(struct notifier_block *nb) | ||
42 | { | ||
43 | mutex_lock(¬ify_mutex); | ||
44 | raw_notifier_chain_register(&listen_notify_list, nb); | ||
45 | mutex_unlock(¬ify_mutex); | ||
46 | } | ||
47 | |||
48 | static void unregister_listen_notifier(struct notifier_block *nb) | ||
49 | { | ||
50 | mutex_lock(¬ify_mutex); | ||
51 | raw_notifier_chain_unregister(&listen_notify_list, nb); | ||
52 | mutex_unlock(¬ify_mutex); | ||
53 | } | ||
54 | |||
55 | static int listen_notify_handler(struct notifier_block *this, | ||
56 | unsigned long event, void *data) | ||
57 | { | ||
58 | struct chtls_dev *cdev; | ||
59 | struct sock *sk; | ||
60 | int ret; | ||
61 | |||
62 | sk = data; | ||
63 | ret = NOTIFY_DONE; | ||
64 | |||
65 | switch (event) { | ||
66 | case CHTLS_LISTEN_START: | ||
67 | case CHTLS_LISTEN_STOP: | ||
68 | mutex_lock(&cdev_list_lock); | ||
69 | list_for_each_entry(cdev, &cdev_list, list) { | ||
70 | if (event == CHTLS_LISTEN_START) | ||
71 | ret = chtls_listen_start(cdev, sk); | ||
72 | else | ||
73 | chtls_listen_stop(cdev, sk); | ||
74 | } | ||
75 | mutex_unlock(&cdev_list_lock); | ||
76 | break; | ||
77 | } | ||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | static struct notifier_block listen_notifier = { | ||
82 | .notifier_call = listen_notify_handler | ||
83 | }; | ||
84 | |||
85 | static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) | ||
86 | { | ||
87 | if (likely(skb_transport_header(skb) != skb_network_header(skb))) | ||
88 | return tcp_v4_do_rcv(sk, skb); | ||
89 | BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static int chtls_start_listen(struct sock *sk) | ||
94 | { | ||
95 | int err; | ||
96 | |||
97 | if (sk->sk_protocol != IPPROTO_TCP) | ||
98 | return -EPROTONOSUPPORT; | ||
99 | |||
100 | if (sk->sk_family == PF_INET && | ||
101 | LOOPBACK(inet_sk(sk)->inet_rcv_saddr)) | ||
102 | return -EADDRNOTAVAIL; | ||
103 | |||
104 | sk->sk_backlog_rcv = listen_backlog_rcv; | ||
105 | mutex_lock(¬ify_mutex); | ||
106 | err = raw_notifier_call_chain(&listen_notify_list, | ||
107 | CHTLS_LISTEN_START, sk); | ||
108 | mutex_unlock(¬ify_mutex); | ||
109 | return err; | ||
110 | } | ||
111 | |||
112 | static void chtls_stop_listen(struct sock *sk) | ||
113 | { | ||
114 | if (sk->sk_protocol != IPPROTO_TCP) | ||
115 | return; | ||
116 | |||
117 | mutex_lock(¬ify_mutex); | ||
118 | raw_notifier_call_chain(&listen_notify_list, | ||
119 | CHTLS_LISTEN_STOP, sk); | ||
120 | mutex_unlock(¬ify_mutex); | ||
121 | } | ||
122 | |||
123 | static int chtls_inline_feature(struct tls_device *dev) | ||
124 | { | ||
125 | struct net_device *netdev; | ||
126 | struct chtls_dev *cdev; | ||
127 | int i; | ||
128 | |||
129 | cdev = to_chtls_dev(dev); | ||
130 | |||
131 | for (i = 0; i < cdev->lldi->nports; i++) { | ||
132 | netdev = cdev->ports[i]; | ||
133 | if (netdev->features & NETIF_F_HW_TLS_RECORD) | ||
134 | return 1; | ||
135 | } | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int chtls_create_hash(struct tls_device *dev, struct sock *sk) | ||
140 | { | ||
141 | if (sk->sk_state == TCP_LISTEN) | ||
142 | return chtls_start_listen(sk); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk) | ||
147 | { | ||
148 | if (sk->sk_state == TCP_LISTEN) | ||
149 | chtls_stop_listen(sk); | ||
150 | } | ||
151 | |||
152 | static void chtls_register_dev(struct chtls_dev *cdev) | ||
153 | { | ||
154 | struct tls_device *tlsdev = &cdev->tlsdev; | ||
155 | |||
156 | strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX); | ||
157 | strlcat(tlsdev->name, cdev->lldi->ports[0]->name, | ||
158 | TLS_DEVICE_NAME_MAX); | ||
159 | tlsdev->feature = chtls_inline_feature; | ||
160 | tlsdev->hash = chtls_create_hash; | ||
161 | tlsdev->unhash = chtls_destroy_hash; | ||
162 | tls_register_device(&cdev->tlsdev); | ||
163 | } | ||
164 | |||
165 | static void chtls_unregister_dev(struct chtls_dev *cdev) | ||
166 | { | ||
167 | tls_unregister_device(&cdev->tlsdev); | ||
168 | } | ||
169 | |||
170 | static void process_deferq(struct work_struct *task_param) | ||
171 | { | ||
172 | struct chtls_dev *cdev = container_of(task_param, | ||
173 | struct chtls_dev, deferq_task); | ||
174 | struct sk_buff *skb; | ||
175 | |||
176 | spin_lock_bh(&cdev->deferq.lock); | ||
177 | while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) { | ||
178 | spin_unlock_bh(&cdev->deferq.lock); | ||
179 | DEFERRED_SKB_CB(skb)->handler(cdev, skb); | ||
180 | spin_lock_bh(&cdev->deferq.lock); | ||
181 | } | ||
182 | spin_unlock_bh(&cdev->deferq.lock); | ||
183 | } | ||
184 | |||
185 | static int chtls_get_skb(struct chtls_dev *cdev) | ||
186 | { | ||
187 | cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL); | ||
188 | if (!cdev->askb) | ||
189 | return -ENOMEM; | ||
190 | |||
191 | skb_put(cdev->askb, sizeof(struct tcphdr)); | ||
192 | skb_reset_transport_header(cdev->askb); | ||
193 | memset(cdev->askb->data, 0, cdev->askb->len); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static void *chtls_uld_add(const struct cxgb4_lld_info *info) | ||
198 | { | ||
199 | struct cxgb4_lld_info *lldi; | ||
200 | struct chtls_dev *cdev; | ||
201 | int i, j; | ||
202 | |||
203 | cdev = kzalloc(sizeof(*cdev) + info->nports * | ||
204 | (sizeof(struct net_device *)), GFP_KERNEL); | ||
205 | if (!cdev) | ||
206 | goto out; | ||
207 | |||
208 | lldi = kzalloc(sizeof(*lldi), GFP_KERNEL); | ||
209 | if (!lldi) | ||
210 | goto out_lldi; | ||
211 | |||
212 | if (chtls_get_skb(cdev)) | ||
213 | goto out_skb; | ||
214 | |||
215 | *lldi = *info; | ||
216 | cdev->lldi = lldi; | ||
217 | cdev->pdev = lldi->pdev; | ||
218 | cdev->tids = lldi->tids; | ||
219 | cdev->ports = (struct net_device **)(cdev + 1); | ||
220 | cdev->ports = lldi->ports; | ||
221 | cdev->mtus = lldi->mtus; | ||
222 | cdev->tids = lldi->tids; | ||
223 | cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) | ||
224 | << FW_VIID_PFN_S; | ||
225 | |||
226 | for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) { | ||
227 | unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8; | ||
228 | |||
229 | cdev->rspq_skb_cache[i] = __alloc_skb(size, | ||
230 | gfp_any(), 0, | ||
231 | lldi->nodeid); | ||
232 | if (unlikely(!cdev->rspq_skb_cache[i])) | ||
233 | goto out_rspq_skb; | ||
234 | } | ||
235 | |||
236 | idr_init(&cdev->hwtid_idr); | ||
237 | INIT_WORK(&cdev->deferq_task, process_deferq); | ||
238 | spin_lock_init(&cdev->listen_lock); | ||
239 | spin_lock_init(&cdev->idr_lock); | ||
240 | cdev->send_page_order = min_t(uint, get_order(32768), | ||
241 | send_page_order); | ||
242 | |||
243 | if (lldi->vr->key.size) | ||
244 | if (chtls_init_kmap(cdev, lldi)) | ||
245 | goto out_rspq_skb; | ||
246 | |||
247 | mutex_lock(&cdev_mutex); | ||
248 | list_add_tail(&cdev->list, &cdev_list); | ||
249 | mutex_unlock(&cdev_mutex); | ||
250 | |||
251 | return cdev; | ||
252 | out_rspq_skb: | ||
253 | for (j = 0; j <= i; j++) | ||
254 | kfree_skb(cdev->rspq_skb_cache[j]); | ||
255 | kfree_skb(cdev->askb); | ||
256 | out_skb: | ||
257 | kfree(lldi); | ||
258 | out_lldi: | ||
259 | kfree(cdev); | ||
260 | out: | ||
261 | return NULL; | ||
262 | } | ||
263 | |||
264 | static void chtls_free_uld(struct chtls_dev *cdev) | ||
265 | { | ||
266 | int i; | ||
267 | |||
268 | chtls_unregister_dev(cdev); | ||
269 | kvfree(cdev->kmap.addr); | ||
270 | idr_destroy(&cdev->hwtid_idr); | ||
271 | for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) | ||
272 | kfree_skb(cdev->rspq_skb_cache[i]); | ||
273 | kfree(cdev->lldi); | ||
274 | if (cdev->askb) | ||
275 | kfree_skb(cdev->askb); | ||
276 | kfree(cdev); | ||
277 | } | ||
278 | |||
279 | static void chtls_free_all_uld(void) | ||
280 | { | ||
281 | struct chtls_dev *cdev, *tmp; | ||
282 | |||
283 | mutex_lock(&cdev_mutex); | ||
284 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) | ||
285 | chtls_free_uld(cdev); | ||
286 | mutex_unlock(&cdev_mutex); | ||
287 | } | ||
288 | |||
289 | static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state) | ||
290 | { | ||
291 | struct chtls_dev *cdev = handle; | ||
292 | |||
293 | switch (new_state) { | ||
294 | case CXGB4_STATE_UP: | ||
295 | chtls_register_dev(cdev); | ||
296 | break; | ||
297 | case CXGB4_STATE_DOWN: | ||
298 | break; | ||
299 | case CXGB4_STATE_START_RECOVERY: | ||
300 | break; | ||
301 | case CXGB4_STATE_DETACH: | ||
302 | mutex_lock(&cdev_mutex); | ||
303 | list_del(&cdev->list); | ||
304 | mutex_unlock(&cdev_mutex); | ||
305 | chtls_free_uld(cdev); | ||
306 | break; | ||
307 | default: | ||
308 | break; | ||
309 | } | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, | ||
314 | const __be64 *rsp, | ||
315 | u32 pktshift) | ||
316 | { | ||
317 | struct sk_buff *skb; | ||
318 | |||
319 | /* Allocate space for cpl_pass_accpet_req which will be synthesized by | ||
320 | * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go | ||
321 | * through the regular cpl_pass_accept_req processing in TOM. | ||
322 | */ | ||
323 | skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) | ||
324 | - pktshift, GFP_ATOMIC); | ||
325 | if (unlikely(!skb)) | ||
326 | return NULL; | ||
327 | __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) | ||
328 | - pktshift); | ||
329 | /* For now we will copy cpl_rx_pkt in the skb */ | ||
330 | skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt)); | ||
331 | skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req) | ||
332 | , gl->va + pktshift, | ||
333 | gl->tot_len - pktshift); | ||
334 | |||
335 | return skb; | ||
336 | } | ||
337 | |||
338 | static int chtls_recv_packet(struct chtls_dev *cdev, | ||
339 | const struct pkt_gl *gl, const __be64 *rsp) | ||
340 | { | ||
341 | unsigned int opcode = *(u8 *)rsp; | ||
342 | struct sk_buff *skb; | ||
343 | int ret; | ||
344 | |||
345 | skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); | ||
346 | if (!skb) | ||
347 | return -ENOMEM; | ||
348 | |||
349 | ret = chtls_handlers[opcode](cdev, skb); | ||
350 | if (ret & CPL_RET_BUF_DONE) | ||
351 | kfree_skb(skb); | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp) | ||
357 | { | ||
358 | unsigned long rspq_bin; | ||
359 | unsigned int opcode; | ||
360 | struct sk_buff *skb; | ||
361 | unsigned int len; | ||
362 | int ret; | ||
363 | |||
364 | len = 64 - sizeof(struct rsp_ctrl) - 8; | ||
365 | opcode = *(u8 *)rsp; | ||
366 | |||
367 | rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS); | ||
368 | skb = cdev->rspq_skb_cache[rspq_bin]; | ||
369 | if (skb && !skb_is_nonlinear(skb) && | ||
370 | !skb_shared(skb) && !skb_cloned(skb)) { | ||
371 | refcount_inc(&skb->users); | ||
372 | if (refcount_read(&skb->users) == 2) { | ||
373 | __skb_trim(skb, 0); | ||
374 | if (skb_tailroom(skb) >= len) | ||
375 | goto copy_out; | ||
376 | } | ||
377 | refcount_dec(&skb->users); | ||
378 | } | ||
379 | skb = alloc_skb(len, GFP_ATOMIC); | ||
380 | if (unlikely(!skb)) | ||
381 | return -ENOMEM; | ||
382 | |||
383 | copy_out: | ||
384 | __skb_put(skb, len); | ||
385 | skb_copy_to_linear_data(skb, rsp, len); | ||
386 | skb_reset_network_header(skb); | ||
387 | skb_reset_transport_header(skb); | ||
388 | ret = chtls_handlers[opcode](cdev, skb); | ||
389 | |||
390 | if (ret & CPL_RET_BUF_DONE) | ||
391 | kfree_skb(skb); | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static void chtls_recv(struct chtls_dev *cdev, | ||
396 | struct sk_buff **skbs, const __be64 *rsp) | ||
397 | { | ||
398 | struct sk_buff *skb = *skbs; | ||
399 | unsigned int opcode; | ||
400 | int ret; | ||
401 | |||
402 | opcode = *(u8 *)rsp; | ||
403 | |||
404 | __skb_push(skb, sizeof(struct rss_header)); | ||
405 | skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header)); | ||
406 | |||
407 | ret = chtls_handlers[opcode](cdev, skb); | ||
408 | if (ret & CPL_RET_BUF_DONE) | ||
409 | kfree_skb(skb); | ||
410 | } | ||
411 | |||
412 | static int chtls_uld_rx_handler(void *handle, const __be64 *rsp, | ||
413 | const struct pkt_gl *gl) | ||
414 | { | ||
415 | struct chtls_dev *cdev = handle; | ||
416 | unsigned int opcode; | ||
417 | struct sk_buff *skb; | ||
418 | |||
419 | opcode = *(u8 *)rsp; | ||
420 | |||
421 | if (unlikely(opcode == CPL_RX_PKT)) { | ||
422 | if (chtls_recv_packet(cdev, gl, rsp) < 0) | ||
423 | goto nomem; | ||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | if (!gl) | ||
428 | return chtls_recv_rsp(cdev, rsp); | ||
429 | |||
430 | #define RX_PULL_LEN 128 | ||
431 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); | ||
432 | if (unlikely(!skb)) | ||
433 | goto nomem; | ||
434 | chtls_recv(cdev, &skb, rsp); | ||
435 | return 0; | ||
436 | |||
437 | nomem: | ||
438 | return -ENOMEM; | ||
439 | } | ||
440 | |||
441 | static int do_chtls_getsockopt(struct sock *sk, char __user *optval, | ||
442 | int __user *optlen) | ||
443 | { | ||
444 | struct tls_crypto_info crypto_info; | ||
445 | |||
446 | crypto_info.version = TLS_1_2_VERSION; | ||
447 | if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info))) | ||
448 | return -EFAULT; | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int chtls_getsockopt(struct sock *sk, int level, int optname, | ||
453 | char __user *optval, int __user *optlen) | ||
454 | { | ||
455 | struct tls_context *ctx = tls_get_ctx(sk); | ||
456 | |||
457 | if (level != SOL_TLS) | ||
458 | return ctx->getsockopt(sk, level, optname, optval, optlen); | ||
459 | |||
460 | return do_chtls_getsockopt(sk, optval, optlen); | ||
461 | } | ||
462 | |||
463 | static int do_chtls_setsockopt(struct sock *sk, int optname, | ||
464 | char __user *optval, unsigned int optlen) | ||
465 | { | ||
466 | struct tls_crypto_info *crypto_info, tmp_crypto_info; | ||
467 | struct chtls_sock *csk; | ||
468 | int keylen; | ||
469 | int rc = 0; | ||
470 | |||
471 | csk = rcu_dereference_sk_user_data(sk); | ||
472 | |||
473 | if (!optval || optlen < sizeof(*crypto_info)) { | ||
474 | rc = -EINVAL; | ||
475 | goto out; | ||
476 | } | ||
477 | |||
478 | rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info)); | ||
479 | if (rc) { | ||
480 | rc = -EFAULT; | ||
481 | goto out; | ||
482 | } | ||
483 | |||
484 | /* check version */ | ||
485 | if (tmp_crypto_info.version != TLS_1_2_VERSION) { | ||
486 | rc = -ENOTSUPP; | ||
487 | goto out; | ||
488 | } | ||
489 | |||
490 | crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; | ||
491 | |||
492 | switch (tmp_crypto_info.cipher_type) { | ||
493 | case TLS_CIPHER_AES_GCM_128: { | ||
494 | rc = copy_from_user(crypto_info, optval, | ||
495 | sizeof(struct | ||
496 | tls12_crypto_info_aes_gcm_128)); | ||
497 | |||
498 | if (rc) { | ||
499 | rc = -EFAULT; | ||
500 | goto out; | ||
501 | } | ||
502 | |||
503 | keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; | ||
504 | rc = chtls_setkey(csk, keylen, optname); | ||
505 | break; | ||
506 | } | ||
507 | default: | ||
508 | rc = -EINVAL; | ||
509 | goto out; | ||
510 | } | ||
511 | out: | ||
512 | return rc; | ||
513 | } | ||
514 | |||
515 | static int chtls_setsockopt(struct sock *sk, int level, int optname, | ||
516 | char __user *optval, unsigned int optlen) | ||
517 | { | ||
518 | struct tls_context *ctx = tls_get_ctx(sk); | ||
519 | |||
520 | if (level != SOL_TLS) | ||
521 | return ctx->setsockopt(sk, level, optname, optval, optlen); | ||
522 | |||
523 | return do_chtls_setsockopt(sk, optname, optval, optlen); | ||
524 | } | ||
525 | |||
526 | static struct cxgb4_uld_info chtls_uld_info = { | ||
527 | .name = DRV_NAME, | ||
528 | .nrxq = MAX_ULD_QSETS, | ||
529 | .ntxq = MAX_ULD_QSETS, | ||
530 | .rxq_size = 1024, | ||
531 | .add = chtls_uld_add, | ||
532 | .state_change = chtls_uld_state_change, | ||
533 | .rx_handler = chtls_uld_rx_handler, | ||
534 | }; | ||
535 | |||
536 | void chtls_install_cpl_ops(struct sock *sk) | ||
537 | { | ||
538 | sk->sk_prot = &chtls_cpl_prot; | ||
539 | } | ||
540 | |||
541 | static void __init chtls_init_ulp_ops(void) | ||
542 | { | ||
543 | chtls_cpl_prot = tcp_prot; | ||
544 | chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops, | ||
545 | &tcp_prot, PF_INET); | ||
546 | chtls_cpl_prot.close = chtls_close; | ||
547 | chtls_cpl_prot.disconnect = chtls_disconnect; | ||
548 | chtls_cpl_prot.destroy = chtls_destroy_sock; | ||
549 | chtls_cpl_prot.shutdown = chtls_shutdown; | ||
550 | chtls_cpl_prot.setsockopt = chtls_setsockopt; | ||
551 | chtls_cpl_prot.getsockopt = chtls_getsockopt; | ||
552 | } | ||
553 | |||
554 | static int __init chtls_register(void) | ||
555 | { | ||
556 | chtls_init_ulp_ops(); | ||
557 | register_listen_notifier(&listen_notifier); | ||
558 | cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info); | ||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | static void __exit chtls_unregister(void) | ||
563 | { | ||
564 | unregister_listen_notifier(&listen_notifier); | ||
565 | chtls_free_all_uld(); | ||
566 | cxgb4_unregister_uld(CXGB4_ULD_TLS); | ||
567 | } | ||
568 | |||
569 | module_init(chtls_register); | ||
570 | module_exit(chtls_unregister); | ||
571 | |||
572 | MODULE_DESCRIPTION("Chelsio TLS Inline driver"); | ||
573 | MODULE_LICENSE("GPL"); | ||
574 | MODULE_AUTHOR("Chelsio Communications"); | ||
575 | MODULE_VERSION(DRV_VERSION); | ||