diff options
author | Anup Patel <anup.patel@broadcom.com> | 2017-08-22 05:57:06 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2017-08-28 07:14:24 -0400 |
commit | ecbf9ef15a891a1e716c1ea611cae9fa5ef37522 (patch) | |
tree | 9212bf8ccdab9b5b23c061027a750e53698bea4d | |
parent | 29e0f486d975fabbadb770f957adeb75d73ab2d0 (diff) |
dmaengine: bcm-sba-raid: Remove redundant SBA_REQUEST_STATE_COMPLETED
The SBA_REQUEST_STATE_COMPLETED state was added to keep track
of sba_request which got completed but cannot be freed because
underlying Async Tx descriptor was not ACKed by DMA client.
Instead of above, we can free the sba_request with non-ACKed
Async Tx descriptor and sba_alloc_request() will ensure that
it always allocates sba_request with ACKed Async Tx descriptor.
This alternate approach makes SBA_REQUEST_STATE_COMPLETED state
redundant hence this patch removes it.
Signed-off-by: Anup Patel <anup.patel@broadcom.com>
Reviewed-by: Ray Jui <ray.jui@broadcom.com>
Reviewed-by: Scott Branden <scott.branden@broadcom.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/bcm-sba-raid.c | 63 |
1 files changed, 17 insertions, 46 deletions
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index c3643d30e31b..6c2c44724637 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c | |||
@@ -99,8 +99,7 @@ enum sba_request_flags { | |||
99 | SBA_REQUEST_STATE_ALLOCED = 0x002, | 99 | SBA_REQUEST_STATE_ALLOCED = 0x002, |
100 | SBA_REQUEST_STATE_PENDING = 0x004, | 100 | SBA_REQUEST_STATE_PENDING = 0x004, |
101 | SBA_REQUEST_STATE_ACTIVE = 0x008, | 101 | SBA_REQUEST_STATE_ACTIVE = 0x008, |
102 | SBA_REQUEST_STATE_COMPLETED = 0x010, | 102 | SBA_REQUEST_STATE_ABORTED = 0x010, |
103 | SBA_REQUEST_STATE_ABORTED = 0x020, | ||
104 | SBA_REQUEST_STATE_MASK = 0x0ff, | 103 | SBA_REQUEST_STATE_MASK = 0x0ff, |
105 | SBA_REQUEST_FENCE = 0x100, | 104 | SBA_REQUEST_FENCE = 0x100, |
106 | }; | 105 | }; |
@@ -160,7 +159,6 @@ struct sba_device { | |||
160 | struct list_head reqs_alloc_list; | 159 | struct list_head reqs_alloc_list; |
161 | struct list_head reqs_pending_list; | 160 | struct list_head reqs_pending_list; |
162 | struct list_head reqs_active_list; | 161 | struct list_head reqs_active_list; |
163 | struct list_head reqs_completed_list; | ||
164 | struct list_head reqs_aborted_list; | 162 | struct list_head reqs_aborted_list; |
165 | struct list_head reqs_free_list; | 163 | struct list_head reqs_free_list; |
166 | /* DebugFS directory entries */ | 164 | /* DebugFS directory entries */ |
@@ -212,17 +210,21 @@ static void sba_peek_mchans(struct sba_device *sba) | |||
212 | 210 | ||
213 | static struct sba_request *sba_alloc_request(struct sba_device *sba) | 211 | static struct sba_request *sba_alloc_request(struct sba_device *sba) |
214 | { | 212 | { |
213 | bool found = false; | ||
215 | unsigned long flags; | 214 | unsigned long flags; |
216 | struct sba_request *req = NULL; | 215 | struct sba_request *req = NULL; |
217 | 216 | ||
218 | spin_lock_irqsave(&sba->reqs_lock, flags); | 217 | spin_lock_irqsave(&sba->reqs_lock, flags); |
219 | req = list_first_entry_or_null(&sba->reqs_free_list, | 218 | list_for_each_entry(req, &sba->reqs_free_list, node) { |
220 | struct sba_request, node); | 219 | if (async_tx_test_ack(&req->tx)) { |
221 | if (req) | 220 | list_move_tail(&req->node, &sba->reqs_alloc_list); |
222 | list_move_tail(&req->node, &sba->reqs_alloc_list); | 221 | found = true; |
222 | break; | ||
223 | } | ||
224 | } | ||
223 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 225 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
224 | 226 | ||
225 | if (!req) { | 227 | if (!found) { |
226 | /* | 228 | /* |
227 | * We have no more free requests so, we peek | 229 | * We have no more free requests so, we peek |
228 | * mailbox channels hoping few active requests | 230 | * mailbox channels hoping few active requests |
@@ -297,18 +299,6 @@ static void _sba_free_request(struct sba_device *sba, | |||
297 | sba->reqs_fence = false; | 299 | sba->reqs_fence = false; |
298 | } | 300 | } |
299 | 301 | ||
300 | /* Note: Must be called with sba->reqs_lock held */ | ||
301 | static void _sba_complete_request(struct sba_device *sba, | ||
302 | struct sba_request *req) | ||
303 | { | ||
304 | lockdep_assert_held(&sba->reqs_lock); | ||
305 | req->flags &= ~SBA_REQUEST_STATE_MASK; | ||
306 | req->flags |= SBA_REQUEST_STATE_COMPLETED; | ||
307 | list_move_tail(&req->node, &sba->reqs_completed_list); | ||
308 | if (list_empty(&sba->reqs_active_list)) | ||
309 | sba->reqs_fence = false; | ||
310 | } | ||
311 | |||
312 | static void sba_free_chained_requests(struct sba_request *req) | 302 | static void sba_free_chained_requests(struct sba_request *req) |
313 | { | 303 | { |
314 | unsigned long flags; | 304 | unsigned long flags; |
@@ -350,10 +340,6 @@ static void sba_cleanup_nonpending_requests(struct sba_device *sba) | |||
350 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) | 340 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) |
351 | _sba_free_request(sba, req); | 341 | _sba_free_request(sba, req); |
352 | 342 | ||
353 | /* Freeup all completed request */ | ||
354 | list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) | ||
355 | _sba_free_request(sba, req); | ||
356 | |||
357 | /* Set all active requests as aborted */ | 343 | /* Set all active requests as aborted */ |
358 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) | 344 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) |
359 | _sba_abort_request(sba, req); | 345 | _sba_abort_request(sba, req); |
@@ -472,20 +458,8 @@ static void sba_process_received_request(struct sba_device *sba, | |||
472 | _sba_free_request(sba, nreq); | 458 | _sba_free_request(sba, nreq); |
473 | INIT_LIST_HEAD(&first->next); | 459 | INIT_LIST_HEAD(&first->next); |
474 | 460 | ||
475 | /* The client is allowed to attach dependent operations | 461 | /* Free the first request */ |
476 | * until 'ack' is set | 462 | _sba_free_request(sba, first); |
477 | */ | ||
478 | if (!async_tx_test_ack(tx)) | ||
479 | _sba_complete_request(sba, first); | ||
480 | else | ||
481 | _sba_free_request(sba, first); | ||
482 | |||
483 | /* Cleanup completed requests */ | ||
484 | list_for_each_entry_safe(req, nreq, | ||
485 | &sba->reqs_completed_list, node) { | ||
486 | if (async_tx_test_ack(&req->tx)) | ||
487 | _sba_free_request(sba, req); | ||
488 | } | ||
489 | 463 | ||
490 | /* Process pending requests */ | 464 | /* Process pending requests */ |
491 | _sba_process_pending_requests(sba); | 465 | _sba_process_pending_requests(sba); |
@@ -499,13 +473,14 @@ static void sba_write_stats_in_seqfile(struct sba_device *sba, | |||
499 | { | 473 | { |
500 | unsigned long flags; | 474 | unsigned long flags; |
501 | struct sba_request *req; | 475 | struct sba_request *req; |
502 | u32 free_count = 0, alloced_count = 0, pending_count = 0; | 476 | u32 free_count = 0, alloced_count = 0; |
503 | u32 active_count = 0, aborted_count = 0, completed_count = 0; | 477 | u32 pending_count = 0, active_count = 0, aborted_count = 0; |
504 | 478 | ||
505 | spin_lock_irqsave(&sba->reqs_lock, flags); | 479 | spin_lock_irqsave(&sba->reqs_lock, flags); |
506 | 480 | ||
507 | list_for_each_entry(req, &sba->reqs_free_list, node) | 481 | list_for_each_entry(req, &sba->reqs_free_list, node) |
508 | free_count++; | 482 | if (async_tx_test_ack(&req->tx)) |
483 | free_count++; | ||
509 | 484 | ||
510 | list_for_each_entry(req, &sba->reqs_alloc_list, node) | 485 | list_for_each_entry(req, &sba->reqs_alloc_list, node) |
511 | alloced_count++; | 486 | alloced_count++; |
@@ -519,9 +494,6 @@ static void sba_write_stats_in_seqfile(struct sba_device *sba, | |||
519 | list_for_each_entry(req, &sba->reqs_aborted_list, node) | 494 | list_for_each_entry(req, &sba->reqs_aborted_list, node) |
520 | aborted_count++; | 495 | aborted_count++; |
521 | 496 | ||
522 | list_for_each_entry(req, &sba->reqs_completed_list, node) | ||
523 | completed_count++; | ||
524 | |||
525 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 497 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
526 | 498 | ||
527 | seq_printf(file, "maximum requests = %d\n", sba->max_req); | 499 | seq_printf(file, "maximum requests = %d\n", sba->max_req); |
@@ -530,7 +502,6 @@ static void sba_write_stats_in_seqfile(struct sba_device *sba, | |||
530 | seq_printf(file, "pending requests = %d\n", pending_count); | 502 | seq_printf(file, "pending requests = %d\n", pending_count); |
531 | seq_printf(file, "active requests = %d\n", active_count); | 503 | seq_printf(file, "active requests = %d\n", active_count); |
532 | seq_printf(file, "aborted requests = %d\n", aborted_count); | 504 | seq_printf(file, "aborted requests = %d\n", aborted_count); |
533 | seq_printf(file, "completed requests = %d\n", completed_count); | ||
534 | } | 505 | } |
535 | 506 | ||
536 | /* ====== DMAENGINE callbacks ===== */ | 507 | /* ====== DMAENGINE callbacks ===== */ |
@@ -1537,7 +1508,6 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) | |||
1537 | INIT_LIST_HEAD(&sba->reqs_alloc_list); | 1508 | INIT_LIST_HEAD(&sba->reqs_alloc_list); |
1538 | INIT_LIST_HEAD(&sba->reqs_pending_list); | 1509 | INIT_LIST_HEAD(&sba->reqs_pending_list); |
1539 | INIT_LIST_HEAD(&sba->reqs_active_list); | 1510 | INIT_LIST_HEAD(&sba->reqs_active_list); |
1540 | INIT_LIST_HEAD(&sba->reqs_completed_list); | ||
1541 | INIT_LIST_HEAD(&sba->reqs_aborted_list); | 1511 | INIT_LIST_HEAD(&sba->reqs_aborted_list); |
1542 | INIT_LIST_HEAD(&sba->reqs_free_list); | 1512 | INIT_LIST_HEAD(&sba->reqs_free_list); |
1543 | 1513 | ||
@@ -1565,6 +1535,7 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) | |||
1565 | } | 1535 | } |
1566 | memset(&req->msg, 0, sizeof(req->msg)); | 1536 | memset(&req->msg, 0, sizeof(req->msg)); |
1567 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | 1537 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); |
1538 | async_tx_ack(&req->tx); | ||
1568 | req->tx.tx_submit = sba_tx_submit; | 1539 | req->tx.tx_submit = sba_tx_submit; |
1569 | req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; | 1540 | req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; |
1570 | list_add_tail(&req->node, &sba->reqs_free_list); | 1541 | list_add_tail(&req->node, &sba->reqs_free_list); |