diff options
author | Anup Patel <anup.patel@broadcom.com> | 2017-08-22 05:56:53 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2017-08-28 07:14:24 -0400 |
commit | 10f1a33080258626e6ada567953d6a0bdadf5502 (patch) | |
tree | 9da89089f96a633b68a32455d1093ce725f1f988 | |
parent | 57a285085985c80ee0c7baa9938f28cf7e6d69fe (diff) |
dmaengine: bcm-sba-raid: Remove redundant next_count from sba_request
The next_count in sba_request is redundant because same information
is captured by next_pending_count. This patch removes next_count
from sba_request.
Signed-off-by: Anup Patel <anup.patel@broadcom.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/bcm-sba-raid.c | 6 |
1 files changed, 1 insertions, 5 deletions
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index 0f6467173986..d59a5fa2d202 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c | |||
@@ -111,7 +111,6 @@ struct sba_request { | |||
111 | /* Chained requests management */ | 111 | /* Chained requests management */ |
112 | struct sba_request *first; | 112 | struct sba_request *first; |
113 | struct list_head next; | 113 | struct list_head next; |
114 | unsigned int next_count; | ||
115 | atomic_t next_pending_count; | 114 | atomic_t next_pending_count; |
116 | /* BRCM message data */ | 115 | /* BRCM message data */ |
117 | void *resp; | 116 | void *resp; |
@@ -221,7 +220,6 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba) | |||
221 | req->flags = SBA_REQUEST_STATE_ALLOCED; | 220 | req->flags = SBA_REQUEST_STATE_ALLOCED; |
222 | req->first = req; | 221 | req->first = req; |
223 | INIT_LIST_HEAD(&req->next); | 222 | INIT_LIST_HEAD(&req->next); |
224 | req->next_count = 1; | ||
225 | atomic_set(&req->next_pending_count, 1); | 223 | atomic_set(&req->next_pending_count, 1); |
226 | 224 | ||
227 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | 225 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); |
@@ -342,8 +340,7 @@ static void sba_chain_request(struct sba_request *first, | |||
342 | 340 | ||
343 | list_add_tail(&req->next, &first->next); | 341 | list_add_tail(&req->next, &first->next); |
344 | req->first = first; | 342 | req->first = first; |
345 | first->next_count++; | 343 | atomic_inc(&first->next_pending_count); |
346 | atomic_set(&first->next_pending_count, first->next_count); | ||
347 | 344 | ||
348 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 345 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
349 | } | 346 | } |
@@ -1501,7 +1498,6 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) | |||
1501 | req->sba = sba; | 1498 | req->sba = sba; |
1502 | req->flags = SBA_REQUEST_STATE_FREE; | 1499 | req->flags = SBA_REQUEST_STATE_FREE; |
1503 | INIT_LIST_HEAD(&req->next); | 1500 | INIT_LIST_HEAD(&req->next); |
1504 | req->next_count = 1; | ||
1505 | atomic_set(&req->next_pending_count, 0); | 1501 | atomic_set(&req->next_pending_count, 0); |
1506 | req->resp = sba->resp_base + p; | 1502 | req->resp = sba->resp_base + p; |
1507 | req->resp_dma = sba->resp_dma_base + p; | 1503 | req->resp_dma = sba->resp_dma_base + p; |