aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/srp
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2013-10-26 08:40:37 -0400
committerRoland Dreier <roland@purestorage.com>2013-11-08 17:43:17 -0500
commit4d73f95f708ce540a85113b00f5363d0593d871d (patch)
treeffd6af3d00a9329a07a41abed801ccedef1285af /drivers/infiniband/ulp/srp
parentb81d00bddfdccf000f29a5ba5bf7caa8f26fa00b (diff)
IB/srp: Make queue size configurable
Certain storage configurations, e.g. a sufficiently large array of hard disks in a RAID configuration, need a queue depth above 64 to achieve optimal performance. Hence make the queue depth configurable. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Acked-by: David Dillow <dillowda@ornl.gov> Tested-by: Jack Wang <xjtuwjp@gmail.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/ulp/srp')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c125
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h17
2 files changed, 103 insertions, 39 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 735af11d9b82..6aa660d188a3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -299,16 +299,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
299 return -ENOMEM; 299 return -ENOMEM;
300 300
301 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, 301 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
302 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 302 srp_recv_completion, NULL, target,
303 target->comp_vector); 303 target->queue_size, target->comp_vector);
304 if (IS_ERR(recv_cq)) { 304 if (IS_ERR(recv_cq)) {
305 ret = PTR_ERR(recv_cq); 305 ret = PTR_ERR(recv_cq);
306 goto err; 306 goto err;
307 } 307 }
308 308
309 send_cq = ib_create_cq(target->srp_host->srp_dev->dev, 309 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
310 srp_send_completion, NULL, target, SRP_SQ_SIZE, 310 srp_send_completion, NULL, target,
311 target->comp_vector); 311 target->queue_size, target->comp_vector);
312 if (IS_ERR(send_cq)) { 312 if (IS_ERR(send_cq)) {
313 ret = PTR_ERR(send_cq); 313 ret = PTR_ERR(send_cq);
314 goto err_recv_cq; 314 goto err_recv_cq;
@@ -317,8 +317,8 @@ static int srp_create_target_ib(struct srp_target_port *target)
317 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); 317 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
318 318
319 init_attr->event_handler = srp_qp_event; 319 init_attr->event_handler = srp_qp_event;
320 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 320 init_attr->cap.max_send_wr = target->queue_size;
321 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 321 init_attr->cap.max_recv_wr = target->queue_size;
322 init_attr->cap.max_recv_sge = 1; 322 init_attr->cap.max_recv_sge = 1;
323 init_attr->cap.max_send_sge = 1; 323 init_attr->cap.max_send_sge = 1;
324 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 324 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -364,6 +364,10 @@ err:
364 return ret; 364 return ret;
365} 365}
366 366
367/*
368 * Note: this function may be called without srp_alloc_iu_bufs() having been
369 * invoked. Hence the target->[rt]x_ring checks.
370 */
367static void srp_free_target_ib(struct srp_target_port *target) 371static void srp_free_target_ib(struct srp_target_port *target)
368{ 372{
369 int i; 373 int i;
@@ -375,10 +379,18 @@ static void srp_free_target_ib(struct srp_target_port *target)
375 target->qp = NULL; 379 target->qp = NULL;
376 target->send_cq = target->recv_cq = NULL; 380 target->send_cq = target->recv_cq = NULL;
377 381
378 for (i = 0; i < SRP_RQ_SIZE; ++i) 382 if (target->rx_ring) {
379 srp_free_iu(target->srp_host, target->rx_ring[i]); 383 for (i = 0; i < target->queue_size; ++i)
380 for (i = 0; i < SRP_SQ_SIZE; ++i) 384 srp_free_iu(target->srp_host, target->rx_ring[i]);
381 srp_free_iu(target->srp_host, target->tx_ring[i]); 385 kfree(target->rx_ring);
386 target->rx_ring = NULL;
387 }
388 if (target->tx_ring) {
389 for (i = 0; i < target->queue_size; ++i)
390 srp_free_iu(target->srp_host, target->tx_ring[i]);
391 kfree(target->tx_ring);
392 target->tx_ring = NULL;
393 }
382} 394}
383 395
384static void srp_path_rec_completion(int status, 396static void srp_path_rec_completion(int status,
@@ -564,7 +576,11 @@ static void srp_free_req_data(struct srp_target_port *target)
564 struct srp_request *req; 576 struct srp_request *req;
565 int i; 577 int i;
566 578
567 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) { 579 if (!target->req_ring)
580 return;
581
582 for (i = 0; i < target->req_ring_size; ++i) {
583 req = &target->req_ring[i];
568 kfree(req->fmr_list); 584 kfree(req->fmr_list);
569 kfree(req->map_page); 585 kfree(req->map_page);
570 if (req->indirect_dma_addr) { 586 if (req->indirect_dma_addr) {
@@ -574,6 +590,9 @@ static void srp_free_req_data(struct srp_target_port *target)
574 } 590 }
575 kfree(req->indirect_desc); 591 kfree(req->indirect_desc);
576 } 592 }
593
594 kfree(target->req_ring);
595 target->req_ring = NULL;
577} 596}
578 597
579static int srp_alloc_req_data(struct srp_target_port *target) 598static int srp_alloc_req_data(struct srp_target_port *target)
@@ -586,7 +605,12 @@ static int srp_alloc_req_data(struct srp_target_port *target)
586 605
587 INIT_LIST_HEAD(&target->free_reqs); 606 INIT_LIST_HEAD(&target->free_reqs);
588 607
589 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 608 target->req_ring = kzalloc(target->req_ring_size *
609 sizeof(*target->req_ring), GFP_KERNEL);
610 if (!target->req_ring)
611 goto out;
612
613 for (i = 0; i < target->req_ring_size; ++i) {
590 req = &target->req_ring[i]; 614 req = &target->req_ring[i];
591 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 615 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
592 GFP_KERNEL); 616 GFP_KERNEL);
@@ -810,7 +834,7 @@ static void srp_terminate_io(struct srp_rport *rport)
810 struct srp_target_port *target = rport->lld_data; 834 struct srp_target_port *target = rport->lld_data;
811 int i; 835 int i;
812 836
813 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 837 for (i = 0; i < target->req_ring_size; ++i) {
814 struct srp_request *req = &target->req_ring[i]; 838 struct srp_request *req = &target->req_ring[i];
815 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16); 839 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
816 } 840 }
@@ -847,13 +871,13 @@ static int srp_rport_reconnect(struct srp_rport *rport)
847 else 871 else
848 srp_create_target_ib(target); 872 srp_create_target_ib(target);
849 873
850 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 874 for (i = 0; i < target->req_ring_size; ++i) {
851 struct srp_request *req = &target->req_ring[i]; 875 struct srp_request *req = &target->req_ring[i];
852 srp_finish_req(target, req, DID_RESET << 16); 876 srp_finish_req(target, req, DID_RESET << 16);
853 } 877 }
854 878
855 INIT_LIST_HEAD(&target->free_tx); 879 INIT_LIST_HEAD(&target->free_tx);
856 for (i = 0; i < SRP_SQ_SIZE; ++i) 880 for (i = 0; i < target->queue_size; ++i)
857 list_add(&target->tx_ring[i]->list, &target->free_tx); 881 list_add(&target->tx_ring[i]->list, &target->free_tx);
858 882
859 if (ret == 0) 883 if (ret == 0)
@@ -1562,11 +1586,24 @@ err_unlock:
1562 return SCSI_MLQUEUE_HOST_BUSY; 1586 return SCSI_MLQUEUE_HOST_BUSY;
1563} 1587}
1564 1588
1589/*
1590 * Note: the resources allocated in this function are freed in
1591 * srp_free_target_ib().
1592 */
1565static int srp_alloc_iu_bufs(struct srp_target_port *target) 1593static int srp_alloc_iu_bufs(struct srp_target_port *target)
1566{ 1594{
1567 int i; 1595 int i;
1568 1596
1569 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1597 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
1598 GFP_KERNEL);
1599 if (!target->rx_ring)
1600 goto err_no_ring;
1601 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
1602 GFP_KERNEL);
1603 if (!target->tx_ring)
1604 goto err_no_ring;
1605
1606 for (i = 0; i < target->queue_size; ++i) {
1570 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 1607 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1571 target->max_ti_iu_len, 1608 target->max_ti_iu_len,
1572 GFP_KERNEL, DMA_FROM_DEVICE); 1609 GFP_KERNEL, DMA_FROM_DEVICE);
@@ -1574,7 +1611,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
1574 goto err; 1611 goto err;
1575 } 1612 }
1576 1613
1577 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1614 for (i = 0; i < target->queue_size; ++i) {
1578 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1615 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1579 target->max_iu_len, 1616 target->max_iu_len,
1580 GFP_KERNEL, DMA_TO_DEVICE); 1617 GFP_KERNEL, DMA_TO_DEVICE);
@@ -1587,16 +1624,18 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
1587 return 0; 1624 return 0;
1588 1625
1589err: 1626err:
1590 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1627 for (i = 0; i < target->queue_size; ++i) {
1591 srp_free_iu(target->srp_host, target->rx_ring[i]); 1628 srp_free_iu(target->srp_host, target->rx_ring[i]);
1592 target->rx_ring[i] = NULL;
1593 }
1594
1595 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1596 srp_free_iu(target->srp_host, target->tx_ring[i]); 1629 srp_free_iu(target->srp_host, target->tx_ring[i]);
1597 target->tx_ring[i] = NULL;
1598 } 1630 }
1599 1631
1632
1633err_no_ring:
1634 kfree(target->tx_ring);
1635 target->tx_ring = NULL;
1636 kfree(target->rx_ring);
1637 target->rx_ring = NULL;
1638
1600 return -ENOMEM; 1639 return -ENOMEM;
1601} 1640}
1602 1641
@@ -1647,6 +1686,9 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1647 target->scsi_host->can_queue 1686 target->scsi_host->can_queue
1648 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, 1687 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1649 target->scsi_host->can_queue); 1688 target->scsi_host->can_queue);
1689 target->scsi_host->cmd_per_lun
1690 = min_t(int, target->scsi_host->can_queue,
1691 target->scsi_host->cmd_per_lun);
1650 } else { 1692 } else {
1651 shost_printk(KERN_WARNING, target->scsi_host, 1693 shost_printk(KERN_WARNING, target->scsi_host,
1652 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 1694 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
@@ -1654,7 +1696,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1654 goto error; 1696 goto error;
1655 } 1697 }
1656 1698
1657 if (!target->rx_ring[0]) { 1699 if (!target->rx_ring) {
1658 ret = srp_alloc_iu_bufs(target); 1700 ret = srp_alloc_iu_bufs(target);
1659 if (ret) 1701 if (ret)
1660 goto error; 1702 goto error;
@@ -1674,7 +1716,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1674 if (ret) 1716 if (ret)
1675 goto error_free; 1717 goto error_free;
1676 1718
1677 for (i = 0; i < SRP_RQ_SIZE; i++) { 1719 for (i = 0; i < target->queue_size; i++) {
1678 struct srp_iu *iu = target->rx_ring[i]; 1720 struct srp_iu *iu = target->rx_ring[i];
1679 ret = srp_post_recv(target, iu); 1721 ret = srp_post_recv(target, iu);
1680 if (ret) 1722 if (ret)
@@ -1933,7 +1975,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1933 if (target->tsk_mgmt_status) 1975 if (target->tsk_mgmt_status)
1934 return FAILED; 1976 return FAILED;
1935 1977
1936 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1978 for (i = 0; i < target->req_ring_size; ++i) {
1937 struct srp_request *req = &target->req_ring[i]; 1979 struct srp_request *req = &target->req_ring[i];
1938 if (req->scmnd && req->scmnd->device == scmnd->device) 1980 if (req->scmnd && req->scmnd->device == scmnd->device)
1939 srp_finish_req(target, req, DID_RESET << 16); 1981 srp_finish_req(target, req, DID_RESET << 16);
@@ -2136,9 +2178,9 @@ static struct scsi_host_template srp_template = {
2136 .eh_host_reset_handler = srp_reset_host, 2178 .eh_host_reset_handler = srp_reset_host,
2137 .skip_settle_delay = true, 2179 .skip_settle_delay = true,
2138 .sg_tablesize = SRP_DEF_SG_TABLESIZE, 2180 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2139 .can_queue = SRP_CMD_SQ_SIZE, 2181 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2140 .this_id = -1, 2182 .this_id = -1,
2141 .cmd_per_lun = SRP_CMD_SQ_SIZE, 2183 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2142 .use_clustering = ENABLE_CLUSTERING, 2184 .use_clustering = ENABLE_CLUSTERING,
2143 .shost_attrs = srp_host_attrs 2185 .shost_attrs = srp_host_attrs
2144}; 2186};
@@ -2245,6 +2287,7 @@ enum {
2245 SRP_OPT_SG_TABLESIZE = 1 << 11, 2287 SRP_OPT_SG_TABLESIZE = 1 << 11,
2246 SRP_OPT_COMP_VECTOR = 1 << 12, 2288 SRP_OPT_COMP_VECTOR = 1 << 12,
2247 SRP_OPT_TL_RETRY_COUNT = 1 << 13, 2289 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2290 SRP_OPT_QUEUE_SIZE = 1 << 14,
2248 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 2291 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2249 SRP_OPT_IOC_GUID | 2292 SRP_OPT_IOC_GUID |
2250 SRP_OPT_DGID | 2293 SRP_OPT_DGID |
@@ -2267,6 +2310,7 @@ static const match_table_t srp_opt_tokens = {
2267 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 2310 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2268 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, 2311 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2269 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, 2312 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2313 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2270 { SRP_OPT_ERR, NULL } 2314 { SRP_OPT_ERR, NULL }
2271}; 2315};
2272 2316
@@ -2361,13 +2405,25 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2361 target->scsi_host->max_sectors = token; 2405 target->scsi_host->max_sectors = token;
2362 break; 2406 break;
2363 2407
2408 case SRP_OPT_QUEUE_SIZE:
2409 if (match_int(args, &token) || token < 1) {
2410 pr_warn("bad queue_size parameter '%s'\n", p);
2411 goto out;
2412 }
2413 target->scsi_host->can_queue = token;
2414 target->queue_size = token + SRP_RSP_SQ_SIZE +
2415 SRP_TSK_MGMT_SQ_SIZE;
2416 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2417 target->scsi_host->cmd_per_lun = token;
2418 break;
2419
2364 case SRP_OPT_MAX_CMD_PER_LUN: 2420 case SRP_OPT_MAX_CMD_PER_LUN:
2365 if (match_int(args, &token)) { 2421 if (match_int(args, &token) || token < 1) {
2366 pr_warn("bad max cmd_per_lun parameter '%s'\n", 2422 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2367 p); 2423 p);
2368 goto out; 2424 goto out;
2369 } 2425 }
2370 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); 2426 target->scsi_host->cmd_per_lun = token;
2371 break; 2427 break;
2372 2428
2373 case SRP_OPT_IO_CLASS: 2429 case SRP_OPT_IO_CLASS:
@@ -2455,6 +2511,12 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2455 pr_warn("target creation request is missing parameter '%s'\n", 2511 pr_warn("target creation request is missing parameter '%s'\n",
2456 srp_opt_tokens[i].pattern); 2512 srp_opt_tokens[i].pattern);
2457 2513
2514 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2515 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2516 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2517 target->scsi_host->cmd_per_lun,
2518 target->scsi_host->can_queue);
2519
2458out: 2520out:
2459 kfree(options); 2521 kfree(options);
2460 return ret; 2522 return ret;
@@ -2493,11 +2555,14 @@ static ssize_t srp_create_target(struct device *dev,
2493 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 2555 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2494 target->allow_ext_sg = allow_ext_sg; 2556 target->allow_ext_sg = allow_ext_sg;
2495 target->tl_retry_count = 7; 2557 target->tl_retry_count = 7;
2558 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
2496 2559
2497 ret = srp_parse_options(buf, target); 2560 ret = srp_parse_options(buf, target);
2498 if (ret) 2561 if (ret)
2499 goto err; 2562 goto err;
2500 2563
2564 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2565
2501 if (!srp_conn_unique(target->srp_host, target)) { 2566 if (!srp_conn_unique(target->srp_host, target)) {
2502 shost_printk(KERN_INFO, target->scsi_host, 2567 shost_printk(KERN_INFO, target->scsi_host,
2503 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", 2568 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 446b0452107b..575681063f38 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -57,14 +57,11 @@ enum {
57 SRP_MAX_LUN = 512, 57 SRP_MAX_LUN = 512,
58 SRP_DEF_SG_TABLESIZE = 12, 58 SRP_DEF_SG_TABLESIZE = 12,
59 59
60 SRP_RQ_SHIFT = 6, 60 SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62
63 SRP_SQ_SIZE = SRP_RQ_SIZE,
64 SRP_RSP_SQ_SIZE = 1, 61 SRP_RSP_SQ_SIZE = 1,
65 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
66 SRP_TSK_MGMT_SQ_SIZE = 1, 62 SRP_TSK_MGMT_SQ_SIZE = 1,
67 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, 63 SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
64 SRP_TSK_MGMT_SQ_SIZE,
68 65
69 SRP_TAG_NO_REQ = ~0U, 66 SRP_TAG_NO_REQ = ~0U,
70 SRP_TAG_TSK_MGMT = 1U << 31, 67 SRP_TAG_TSK_MGMT = 1U << 31,
@@ -156,6 +153,8 @@ struct srp_target_port {
156 char target_name[32]; 153 char target_name[32];
157 unsigned int scsi_id; 154 unsigned int scsi_id;
158 unsigned int sg_tablesize; 155 unsigned int sg_tablesize;
156 int queue_size;
157 int req_ring_size;
159 int comp_vector; 158 int comp_vector;
160 int tl_retry_count; 159 int tl_retry_count;
161 160
@@ -173,9 +172,9 @@ struct srp_target_port {
173 172
174 int zero_req_lim; 173 int zero_req_lim;
175 174
176 struct srp_iu *tx_ring[SRP_SQ_SIZE]; 175 struct srp_iu **tx_ring;
177 struct srp_iu *rx_ring[SRP_RQ_SIZE]; 176 struct srp_iu **rx_ring;
178 struct srp_request req_ring[SRP_CMD_SQ_SIZE]; 177 struct srp_request *req_ring;
179 178
180 struct work_struct tl_err_work; 179 struct work_struct tl_err_work;
181 struct work_struct remove_work; 180 struct work_struct remove_work;