diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 17:03:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 17:03:05 -0400 |
commit | cd7b34fe1c2d93c54b368c295de88612c0b7120b (patch) | |
tree | 63e7726e2b437c79a53d1b3528d4dc685d3f84e3 /drivers/dma/bcm-sba-raid.c | |
parent | 75c727155ce1239c1417ba32a48c796de0d762d4 (diff) | |
parent | 41bd0314fa3a458bee7ad768d079e681316332e7 (diff) |
Merge tag 'dmaengine-4.14-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This one features the usual updates to the drivers and one good part
of removing DA_SG from core as it has no users.
Summary:
- Remove DMA_SG support as we have no users for this feature
- New driver for Altera / Intel mSGDMA IP core
- Support for memset in dmatest and qcom_hidma driver
- Update for non cyclic mode in k3dma, bunch of update in bam_dma,
bcm sba-raid
- Constify device ids across drivers"
* tag 'dmaengine-4.14-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (52 commits)
dmaengine: sun6i: support V3s SoC variant
dmaengine: sun6i: make gate bit in sun8i's DMA engines a common quirk
dmaengine: rcar-dmac: document R8A77970 bindings
dmaengine: xilinx_dma: Fix error code format specifier
dmaengine: altera: Use macros instead of structs to describe the registers
dmaengine: ti-dma-crossbar: Fix dra7 reserve function
dmaengine: pl330: constify amba_id
dmaengine: pl08x: constify amba_id
dmaengine: bcm-sba-raid: Remove redundant SBA_REQUEST_STATE_COMPLETED
dmaengine: bcm-sba-raid: Explicitly ACK mailbox message after sending
dmaengine: bcm-sba-raid: Add debugfs support
dmaengine: bcm-sba-raid: Remove redundant SBA_REQUEST_STATE_RECEIVED
dmaengine: bcm-sba-raid: Re-factor sba_process_deferred_requests()
dmaengine: bcm-sba-raid: Pre-ack async tx descriptor
dmaengine: bcm-sba-raid: Peek mbox when we have no free requests
dmaengine: bcm-sba-raid: Alloc resources before registering DMA device
dmaengine: bcm-sba-raid: Improve sba_issue_pending() run duration
dmaengine: bcm-sba-raid: Increase number of free sba_request
dmaengine: bcm-sba-raid: Allow arbitrary number free sba_request
dmaengine: bcm-sba-raid: Remove reqs_free_count from sba_device
...
Diffstat (limited to 'drivers/dma/bcm-sba-raid.c')
-rw-r--r-- | drivers/dma/bcm-sba-raid.c | 544 |
1 files changed, 295 insertions, 249 deletions
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index e41bbc7cb094..6c2c44724637 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c | |||
@@ -36,6 +36,7 @@ | |||
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/bitops.h> | 38 | #include <linux/bitops.h> |
39 | #include <linux/debugfs.h> | ||
39 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
40 | #include <linux/dmaengine.h> | 41 | #include <linux/dmaengine.h> |
41 | #include <linux/list.h> | 42 | #include <linux/list.h> |
@@ -48,7 +49,8 @@ | |||
48 | 49 | ||
49 | #include "dmaengine.h" | 50 | #include "dmaengine.h" |
50 | 51 | ||
51 | /* SBA command related defines */ | 52 | /* ====== Driver macros and defines ===== */ |
53 | |||
52 | #define SBA_TYPE_SHIFT 48 | 54 | #define SBA_TYPE_SHIFT 48 |
53 | #define SBA_TYPE_MASK GENMASK(1, 0) | 55 | #define SBA_TYPE_MASK GENMASK(1, 0) |
54 | #define SBA_TYPE_A 0x0 | 56 | #define SBA_TYPE_A 0x0 |
@@ -82,39 +84,40 @@ | |||
82 | #define SBA_CMD_WRITE_BUFFER 0xc | 84 | #define SBA_CMD_WRITE_BUFFER 0xc |
83 | #define SBA_CMD_GALOIS 0xe | 85 | #define SBA_CMD_GALOIS 0xe |
84 | 86 | ||
87 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 | ||
88 | |||
85 | /* Driver helper macros */ | 89 | /* Driver helper macros */ |
86 | #define to_sba_request(tx) \ | 90 | #define to_sba_request(tx) \ |
87 | container_of(tx, struct sba_request, tx) | 91 | container_of(tx, struct sba_request, tx) |
88 | #define to_sba_device(dchan) \ | 92 | #define to_sba_device(dchan) \ |
89 | container_of(dchan, struct sba_device, dma_chan) | 93 | container_of(dchan, struct sba_device, dma_chan) |
90 | 94 | ||
91 | enum sba_request_state { | 95 | /* ===== Driver data structures ===== */ |
92 | SBA_REQUEST_STATE_FREE = 1, | 96 | |
93 | SBA_REQUEST_STATE_ALLOCED = 2, | 97 | enum sba_request_flags { |
94 | SBA_REQUEST_STATE_PENDING = 3, | 98 | SBA_REQUEST_STATE_FREE = 0x001, |
95 | SBA_REQUEST_STATE_ACTIVE = 4, | 99 | SBA_REQUEST_STATE_ALLOCED = 0x002, |
96 | SBA_REQUEST_STATE_RECEIVED = 5, | 100 | SBA_REQUEST_STATE_PENDING = 0x004, |
97 | SBA_REQUEST_STATE_COMPLETED = 6, | 101 | SBA_REQUEST_STATE_ACTIVE = 0x008, |
98 | SBA_REQUEST_STATE_ABORTED = 7, | 102 | SBA_REQUEST_STATE_ABORTED = 0x010, |
103 | SBA_REQUEST_STATE_MASK = 0x0ff, | ||
104 | SBA_REQUEST_FENCE = 0x100, | ||
99 | }; | 105 | }; |
100 | 106 | ||
101 | struct sba_request { | 107 | struct sba_request { |
102 | /* Global state */ | 108 | /* Global state */ |
103 | struct list_head node; | 109 | struct list_head node; |
104 | struct sba_device *sba; | 110 | struct sba_device *sba; |
105 | enum sba_request_state state; | 111 | u32 flags; |
106 | bool fence; | ||
107 | /* Chained requests management */ | 112 | /* Chained requests management */ |
108 | struct sba_request *first; | 113 | struct sba_request *first; |
109 | struct list_head next; | 114 | struct list_head next; |
110 | unsigned int next_count; | ||
111 | atomic_t next_pending_count; | 115 | atomic_t next_pending_count; |
112 | /* BRCM message data */ | 116 | /* BRCM message data */ |
113 | void *resp; | ||
114 | dma_addr_t resp_dma; | ||
115 | struct brcm_sba_command *cmds; | ||
116 | struct brcm_message msg; | 117 | struct brcm_message msg; |
117 | struct dma_async_tx_descriptor tx; | 118 | struct dma_async_tx_descriptor tx; |
119 | /* SBA commands */ | ||
120 | struct brcm_sba_command cmds[0]; | ||
118 | }; | 121 | }; |
119 | 122 | ||
120 | enum sba_version { | 123 | enum sba_version { |
@@ -152,19 +155,18 @@ struct sba_device { | |||
152 | void *cmds_base; | 155 | void *cmds_base; |
153 | dma_addr_t cmds_dma_base; | 156 | dma_addr_t cmds_dma_base; |
154 | spinlock_t reqs_lock; | 157 | spinlock_t reqs_lock; |
155 | struct sba_request *reqs; | ||
156 | bool reqs_fence; | 158 | bool reqs_fence; |
157 | struct list_head reqs_alloc_list; | 159 | struct list_head reqs_alloc_list; |
158 | struct list_head reqs_pending_list; | 160 | struct list_head reqs_pending_list; |
159 | struct list_head reqs_active_list; | 161 | struct list_head reqs_active_list; |
160 | struct list_head reqs_received_list; | ||
161 | struct list_head reqs_completed_list; | ||
162 | struct list_head reqs_aborted_list; | 162 | struct list_head reqs_aborted_list; |
163 | struct list_head reqs_free_list; | 163 | struct list_head reqs_free_list; |
164 | int reqs_free_count; | 164 | /* DebugFS directory entries */ |
165 | struct dentry *root; | ||
166 | struct dentry *stats; | ||
165 | }; | 167 | }; |
166 | 168 | ||
167 | /* ====== SBA command helper routines ===== */ | 169 | /* ====== Command helper routines ===== */ |
168 | 170 | ||
169 | static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) | 171 | static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) |
170 | { | 172 | { |
@@ -196,32 +198,50 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) | |||
196 | ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); | 198 | ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); |
197 | } | 199 | } |
198 | 200 | ||
199 | /* ====== Channel resource management routines ===== */ | 201 | /* ====== General helper routines ===== */ |
202 | |||
203 | static void sba_peek_mchans(struct sba_device *sba) | ||
204 | { | ||
205 | int mchan_idx; | ||
206 | |||
207 | for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) | ||
208 | mbox_client_peek_data(sba->mchans[mchan_idx]); | ||
209 | } | ||
200 | 210 | ||
201 | static struct sba_request *sba_alloc_request(struct sba_device *sba) | 211 | static struct sba_request *sba_alloc_request(struct sba_device *sba) |
202 | { | 212 | { |
213 | bool found = false; | ||
203 | unsigned long flags; | 214 | unsigned long flags; |
204 | struct sba_request *req = NULL; | 215 | struct sba_request *req = NULL; |
205 | 216 | ||
206 | spin_lock_irqsave(&sba->reqs_lock, flags); | 217 | spin_lock_irqsave(&sba->reqs_lock, flags); |
218 | list_for_each_entry(req, &sba->reqs_free_list, node) { | ||
219 | if (async_tx_test_ack(&req->tx)) { | ||
220 | list_move_tail(&req->node, &sba->reqs_alloc_list); | ||
221 | found = true; | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
207 | 226 | ||
208 | req = list_first_entry_or_null(&sba->reqs_free_list, | 227 | if (!found) { |
209 | struct sba_request, node); | 228 | /* |
210 | if (req) { | 229 | * We have no more free requests so, we peek |
211 | list_move_tail(&req->node, &sba->reqs_alloc_list); | 230 | * mailbox channels hoping few active requests |
212 | req->state = SBA_REQUEST_STATE_ALLOCED; | 231 | * would have completed which will create more |
213 | req->fence = false; | 232 | * room for new requests. |
214 | req->first = req; | 233 | */ |
215 | INIT_LIST_HEAD(&req->next); | 234 | sba_peek_mchans(sba); |
216 | req->next_count = 1; | 235 | return NULL; |
217 | atomic_set(&req->next_pending_count, 1); | ||
218 | |||
219 | sba->reqs_free_count--; | ||
220 | |||
221 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | ||
222 | } | 236 | } |
223 | 237 | ||
224 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 238 | req->flags = SBA_REQUEST_STATE_ALLOCED; |
239 | req->first = req; | ||
240 | INIT_LIST_HEAD(&req->next); | ||
241 | atomic_set(&req->next_pending_count, 1); | ||
242 | |||
243 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | ||
244 | async_tx_ack(&req->tx); | ||
225 | 245 | ||
226 | return req; | 246 | return req; |
227 | } | 247 | } |
@@ -231,7 +251,8 @@ static void _sba_pending_request(struct sba_device *sba, | |||
231 | struct sba_request *req) | 251 | struct sba_request *req) |
232 | { | 252 | { |
233 | lockdep_assert_held(&sba->reqs_lock); | 253 | lockdep_assert_held(&sba->reqs_lock); |
234 | req->state = SBA_REQUEST_STATE_PENDING; | 254 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
255 | req->flags |= SBA_REQUEST_STATE_PENDING; | ||
235 | list_move_tail(&req->node, &sba->reqs_pending_list); | 256 | list_move_tail(&req->node, &sba->reqs_pending_list); |
236 | if (list_empty(&sba->reqs_active_list)) | 257 | if (list_empty(&sba->reqs_active_list)) |
237 | sba->reqs_fence = false; | 258 | sba->reqs_fence = false; |
@@ -246,9 +267,10 @@ static bool _sba_active_request(struct sba_device *sba, | |||
246 | sba->reqs_fence = false; | 267 | sba->reqs_fence = false; |
247 | if (sba->reqs_fence) | 268 | if (sba->reqs_fence) |
248 | return false; | 269 | return false; |
249 | req->state = SBA_REQUEST_STATE_ACTIVE; | 270 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
271 | req->flags |= SBA_REQUEST_STATE_ACTIVE; | ||
250 | list_move_tail(&req->node, &sba->reqs_active_list); | 272 | list_move_tail(&req->node, &sba->reqs_active_list); |
251 | if (req->fence) | 273 | if (req->flags & SBA_REQUEST_FENCE) |
252 | sba->reqs_fence = true; | 274 | sba->reqs_fence = true; |
253 | return true; | 275 | return true; |
254 | } | 276 | } |
@@ -258,7 +280,8 @@ static void _sba_abort_request(struct sba_device *sba, | |||
258 | struct sba_request *req) | 280 | struct sba_request *req) |
259 | { | 281 | { |
260 | lockdep_assert_held(&sba->reqs_lock); | 282 | lockdep_assert_held(&sba->reqs_lock); |
261 | req->state = SBA_REQUEST_STATE_ABORTED; | 283 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
284 | req->flags |= SBA_REQUEST_STATE_ABORTED; | ||
262 | list_move_tail(&req->node, &sba->reqs_aborted_list); | 285 | list_move_tail(&req->node, &sba->reqs_aborted_list); |
263 | if (list_empty(&sba->reqs_active_list)) | 286 | if (list_empty(&sba->reqs_active_list)) |
264 | sba->reqs_fence = false; | 287 | sba->reqs_fence = false; |
@@ -269,42 +292,11 @@ static void _sba_free_request(struct sba_device *sba, | |||
269 | struct sba_request *req) | 292 | struct sba_request *req) |
270 | { | 293 | { |
271 | lockdep_assert_held(&sba->reqs_lock); | 294 | lockdep_assert_held(&sba->reqs_lock); |
272 | req->state = SBA_REQUEST_STATE_FREE; | 295 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
296 | req->flags |= SBA_REQUEST_STATE_FREE; | ||
273 | list_move_tail(&req->node, &sba->reqs_free_list); | 297 | list_move_tail(&req->node, &sba->reqs_free_list); |
274 | if (list_empty(&sba->reqs_active_list)) | 298 | if (list_empty(&sba->reqs_active_list)) |
275 | sba->reqs_fence = false; | 299 | sba->reqs_fence = false; |
276 | sba->reqs_free_count++; | ||
277 | } | ||
278 | |||
279 | static void sba_received_request(struct sba_request *req) | ||
280 | { | ||
281 | unsigned long flags; | ||
282 | struct sba_device *sba = req->sba; | ||
283 | |||
284 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
285 | req->state = SBA_REQUEST_STATE_RECEIVED; | ||
286 | list_move_tail(&req->node, &sba->reqs_received_list); | ||
287 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
288 | } | ||
289 | |||
290 | static void sba_complete_chained_requests(struct sba_request *req) | ||
291 | { | ||
292 | unsigned long flags; | ||
293 | struct sba_request *nreq; | ||
294 | struct sba_device *sba = req->sba; | ||
295 | |||
296 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
297 | |||
298 | req->state = SBA_REQUEST_STATE_COMPLETED; | ||
299 | list_move_tail(&req->node, &sba->reqs_completed_list); | ||
300 | list_for_each_entry(nreq, &req->next, next) { | ||
301 | nreq->state = SBA_REQUEST_STATE_COMPLETED; | ||
302 | list_move_tail(&nreq->node, &sba->reqs_completed_list); | ||
303 | } | ||
304 | if (list_empty(&sba->reqs_active_list)) | ||
305 | sba->reqs_fence = false; | ||
306 | |||
307 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
308 | } | 300 | } |
309 | 301 | ||
310 | static void sba_free_chained_requests(struct sba_request *req) | 302 | static void sba_free_chained_requests(struct sba_request *req) |
@@ -332,8 +324,7 @@ static void sba_chain_request(struct sba_request *first, | |||
332 | 324 | ||
333 | list_add_tail(&req->next, &first->next); | 325 | list_add_tail(&req->next, &first->next); |
334 | req->first = first; | 326 | req->first = first; |
335 | first->next_count++; | 327 | atomic_inc(&first->next_pending_count); |
336 | atomic_set(&first->next_pending_count, first->next_count); | ||
337 | 328 | ||
338 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 329 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
339 | } | 330 | } |
@@ -349,14 +340,6 @@ static void sba_cleanup_nonpending_requests(struct sba_device *sba) | |||
349 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) | 340 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) |
350 | _sba_free_request(sba, req); | 341 | _sba_free_request(sba, req); |
351 | 342 | ||
352 | /* Freeup all received request */ | ||
353 | list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node) | ||
354 | _sba_free_request(sba, req); | ||
355 | |||
356 | /* Freeup all completed request */ | ||
357 | list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) | ||
358 | _sba_free_request(sba, req); | ||
359 | |||
360 | /* Set all active requests as aborted */ | 343 | /* Set all active requests as aborted */ |
361 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) | 344 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) |
362 | _sba_abort_request(sba, req); | 345 | _sba_abort_request(sba, req); |
@@ -383,26 +366,6 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) | |||
383 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 366 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
384 | } | 367 | } |
385 | 368 | ||
386 | /* ====== DMAENGINE callbacks ===== */ | ||
387 | |||
388 | static void sba_free_chan_resources(struct dma_chan *dchan) | ||
389 | { | ||
390 | /* | ||
391 | * Channel resources are pre-alloced so we just free-up | ||
392 | * whatever we can so that we can re-use pre-alloced | ||
393 | * channel resources next time. | ||
394 | */ | ||
395 | sba_cleanup_nonpending_requests(to_sba_device(dchan)); | ||
396 | } | ||
397 | |||
398 | static int sba_device_terminate_all(struct dma_chan *dchan) | ||
399 | { | ||
400 | /* Cleanup all pending requests */ | ||
401 | sba_cleanup_pending_requests(to_sba_device(dchan)); | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static int sba_send_mbox_request(struct sba_device *sba, | 369 | static int sba_send_mbox_request(struct sba_device *sba, |
407 | struct sba_request *req) | 370 | struct sba_request *req) |
408 | { | 371 | { |
@@ -419,42 +382,156 @@ static int sba_send_mbox_request(struct sba_device *sba, | |||
419 | dev_err(sba->dev, "send message failed with error %d", ret); | 382 | dev_err(sba->dev, "send message failed with error %d", ret); |
420 | return ret; | 383 | return ret; |
421 | } | 384 | } |
385 | |||
386 | /* Check error returned by mailbox controller */ | ||
422 | ret = req->msg.error; | 387 | ret = req->msg.error; |
423 | if (ret < 0) { | 388 | if (ret < 0) { |
424 | dev_err(sba->dev, "message error %d", ret); | 389 | dev_err(sba->dev, "message error %d", ret); |
425 | return ret; | ||
426 | } | 390 | } |
427 | 391 | ||
428 | return 0; | 392 | /* Signal txdone for mailbox channel */ |
393 | mbox_client_txdone(sba->mchans[mchans_idx], ret); | ||
394 | |||
395 | return ret; | ||
429 | } | 396 | } |
430 | 397 | ||
431 | static void sba_issue_pending(struct dma_chan *dchan) | 398 | /* Note: Must be called with sba->reqs_lock held */ |
399 | static void _sba_process_pending_requests(struct sba_device *sba) | ||
432 | { | 400 | { |
433 | int ret; | 401 | int ret; |
434 | unsigned long flags; | 402 | u32 count; |
435 | struct sba_request *req, *req1; | 403 | struct sba_request *req; |
436 | struct sba_device *sba = to_sba_device(dchan); | ||
437 | 404 | ||
438 | spin_lock_irqsave(&sba->reqs_lock, flags); | 405 | /* |
406 | * Process few pending requests | ||
407 | * | ||
408 | * For now, we process (<number_of_mailbox_channels> * 8) | ||
409 | * number of requests at a time. | ||
410 | */ | ||
411 | count = sba->mchans_count * 8; | ||
412 | while (!list_empty(&sba->reqs_pending_list) && count) { | ||
413 | /* Get the first pending request */ | ||
414 | req = list_first_entry(&sba->reqs_pending_list, | ||
415 | struct sba_request, node); | ||
439 | 416 | ||
440 | /* Process all pending request */ | ||
441 | list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) { | ||
442 | /* Try to make request active */ | 417 | /* Try to make request active */ |
443 | if (!_sba_active_request(sba, req)) | 418 | if (!_sba_active_request(sba, req)) |
444 | break; | 419 | break; |
445 | 420 | ||
446 | /* Send request to mailbox channel */ | 421 | /* Send request to mailbox channel */ |
447 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
448 | ret = sba_send_mbox_request(sba, req); | 422 | ret = sba_send_mbox_request(sba, req); |
449 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
450 | |||
451 | /* If something went wrong then keep request pending */ | ||
452 | if (ret < 0) { | 423 | if (ret < 0) { |
453 | _sba_pending_request(sba, req); | 424 | _sba_pending_request(sba, req); |
454 | break; | 425 | break; |
455 | } | 426 | } |
427 | |||
428 | count--; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | static void sba_process_received_request(struct sba_device *sba, | ||
433 | struct sba_request *req) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | struct dma_async_tx_descriptor *tx; | ||
437 | struct sba_request *nreq, *first = req->first; | ||
438 | |||
439 | /* Process only after all chained requests are received */ | ||
440 | if (!atomic_dec_return(&first->next_pending_count)) { | ||
441 | tx = &first->tx; | ||
442 | |||
443 | WARN_ON(tx->cookie < 0); | ||
444 | if (tx->cookie > 0) { | ||
445 | dma_cookie_complete(tx); | ||
446 | dmaengine_desc_get_callback_invoke(tx, NULL); | ||
447 | dma_descriptor_unmap(tx); | ||
448 | tx->callback = NULL; | ||
449 | tx->callback_result = NULL; | ||
450 | } | ||
451 | |||
452 | dma_run_dependencies(tx); | ||
453 | |||
454 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
455 | |||
456 | /* Free all requests chained to first request */ | ||
457 | list_for_each_entry(nreq, &first->next, next) | ||
458 | _sba_free_request(sba, nreq); | ||
459 | INIT_LIST_HEAD(&first->next); | ||
460 | |||
461 | /* Free the first request */ | ||
462 | _sba_free_request(sba, first); | ||
463 | |||
464 | /* Process pending requests */ | ||
465 | _sba_process_pending_requests(sba); | ||
466 | |||
467 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
456 | } | 468 | } |
469 | } | ||
470 | |||
471 | static void sba_write_stats_in_seqfile(struct sba_device *sba, | ||
472 | struct seq_file *file) | ||
473 | { | ||
474 | unsigned long flags; | ||
475 | struct sba_request *req; | ||
476 | u32 free_count = 0, alloced_count = 0; | ||
477 | u32 pending_count = 0, active_count = 0, aborted_count = 0; | ||
478 | |||
479 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
480 | |||
481 | list_for_each_entry(req, &sba->reqs_free_list, node) | ||
482 | if (async_tx_test_ack(&req->tx)) | ||
483 | free_count++; | ||
484 | |||
485 | list_for_each_entry(req, &sba->reqs_alloc_list, node) | ||
486 | alloced_count++; | ||
487 | |||
488 | list_for_each_entry(req, &sba->reqs_pending_list, node) | ||
489 | pending_count++; | ||
490 | |||
491 | list_for_each_entry(req, &sba->reqs_active_list, node) | ||
492 | active_count++; | ||
457 | 493 | ||
494 | list_for_each_entry(req, &sba->reqs_aborted_list, node) | ||
495 | aborted_count++; | ||
496 | |||
497 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
498 | |||
499 | seq_printf(file, "maximum requests = %d\n", sba->max_req); | ||
500 | seq_printf(file, "free requests = %d\n", free_count); | ||
501 | seq_printf(file, "alloced requests = %d\n", alloced_count); | ||
502 | seq_printf(file, "pending requests = %d\n", pending_count); | ||
503 | seq_printf(file, "active requests = %d\n", active_count); | ||
504 | seq_printf(file, "aborted requests = %d\n", aborted_count); | ||
505 | } | ||
506 | |||
507 | /* ====== DMAENGINE callbacks ===== */ | ||
508 | |||
509 | static void sba_free_chan_resources(struct dma_chan *dchan) | ||
510 | { | ||
511 | /* | ||
512 | * Channel resources are pre-alloced so we just free-up | ||
513 | * whatever we can so that we can re-use pre-alloced | ||
514 | * channel resources next time. | ||
515 | */ | ||
516 | sba_cleanup_nonpending_requests(to_sba_device(dchan)); | ||
517 | } | ||
518 | |||
519 | static int sba_device_terminate_all(struct dma_chan *dchan) | ||
520 | { | ||
521 | /* Cleanup all pending requests */ | ||
522 | sba_cleanup_pending_requests(to_sba_device(dchan)); | ||
523 | |||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | static void sba_issue_pending(struct dma_chan *dchan) | ||
528 | { | ||
529 | unsigned long flags; | ||
530 | struct sba_device *sba = to_sba_device(dchan); | ||
531 | |||
532 | /* Process pending requests */ | ||
533 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
534 | _sba_process_pending_requests(sba); | ||
458 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 535 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
459 | } | 536 | } |
460 | 537 | ||
@@ -486,17 +563,15 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, | |||
486 | dma_cookie_t cookie, | 563 | dma_cookie_t cookie, |
487 | struct dma_tx_state *txstate) | 564 | struct dma_tx_state *txstate) |
488 | { | 565 | { |
489 | int mchan_idx; | ||
490 | enum dma_status ret; | 566 | enum dma_status ret; |
491 | struct sba_device *sba = to_sba_device(dchan); | 567 | struct sba_device *sba = to_sba_device(dchan); |
492 | 568 | ||
493 | for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) | ||
494 | mbox_client_peek_data(sba->mchans[mchan_idx]); | ||
495 | |||
496 | ret = dma_cookie_status(dchan, cookie, txstate); | 569 | ret = dma_cookie_status(dchan, cookie, txstate); |
497 | if (ret == DMA_COMPLETE) | 570 | if (ret == DMA_COMPLETE) |
498 | return ret; | 571 | return ret; |
499 | 572 | ||
573 | sba_peek_mchans(sba); | ||
574 | |||
500 | return dma_cookie_status(dchan, cookie, txstate); | 575 | return dma_cookie_status(dchan, cookie, txstate); |
501 | } | 576 | } |
502 | 577 | ||
@@ -506,6 +581,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, | |||
506 | { | 581 | { |
507 | u64 cmd; | 582 | u64 cmd; |
508 | u32 c_mdata; | 583 | u32 c_mdata; |
584 | dma_addr_t resp_dma = req->tx.phys; | ||
509 | struct brcm_sba_command *cmdsp = cmds; | 585 | struct brcm_sba_command *cmdsp = cmds; |
510 | 586 | ||
511 | /* Type-B command to load dummy data into buf0 */ | 587 | /* Type-B command to load dummy data into buf0 */ |
@@ -521,7 +597,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, | |||
521 | cmdsp->cmd = cmd; | 597 | cmdsp->cmd = cmd; |
522 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | 598 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
523 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | 599 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
524 | cmdsp->data = req->resp_dma; | 600 | cmdsp->data = resp_dma; |
525 | cmdsp->data_len = req->sba->hw_resp_size; | 601 | cmdsp->data_len = req->sba->hw_resp_size; |
526 | cmdsp++; | 602 | cmdsp++; |
527 | 603 | ||
@@ -542,11 +618,11 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, | |||
542 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 618 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
543 | if (req->sba->hw_resp_size) { | 619 | if (req->sba->hw_resp_size) { |
544 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 620 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
545 | cmdsp->resp = req->resp_dma; | 621 | cmdsp->resp = resp_dma; |
546 | cmdsp->resp_len = req->sba->hw_resp_size; | 622 | cmdsp->resp_len = req->sba->hw_resp_size; |
547 | } | 623 | } |
548 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 624 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
549 | cmdsp->data = req->resp_dma; | 625 | cmdsp->data = resp_dma; |
550 | cmdsp->data_len = req->sba->hw_resp_size; | 626 | cmdsp->data_len = req->sba->hw_resp_size; |
551 | cmdsp++; | 627 | cmdsp++; |
552 | 628 | ||
@@ -573,7 +649,7 @@ sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
573 | * Force fence so that no requests are submitted | 649 | * Force fence so that no requests are submitted |
574 | * until DMA callback for this request is invoked. | 650 | * until DMA callback for this request is invoked. |
575 | */ | 651 | */ |
576 | req->fence = true; | 652 | req->flags |= SBA_REQUEST_FENCE; |
577 | 653 | ||
578 | /* Fillup request message */ | 654 | /* Fillup request message */ |
579 | sba_fillup_interrupt_msg(req, req->cmds, &req->msg); | 655 | sba_fillup_interrupt_msg(req, req->cmds, &req->msg); |
@@ -593,6 +669,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req, | |||
593 | { | 669 | { |
594 | u64 cmd; | 670 | u64 cmd; |
595 | u32 c_mdata; | 671 | u32 c_mdata; |
672 | dma_addr_t resp_dma = req->tx.phys; | ||
596 | struct brcm_sba_command *cmdsp = cmds; | 673 | struct brcm_sba_command *cmdsp = cmds; |
597 | 674 | ||
598 | /* Type-B command to load data into buf0 */ | 675 | /* Type-B command to load data into buf0 */ |
@@ -629,7 +706,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req, | |||
629 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 706 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
630 | if (req->sba->hw_resp_size) { | 707 | if (req->sba->hw_resp_size) { |
631 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 708 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
632 | cmdsp->resp = req->resp_dma; | 709 | cmdsp->resp = resp_dma; |
633 | cmdsp->resp_len = req->sba->hw_resp_size; | 710 | cmdsp->resp_len = req->sba->hw_resp_size; |
634 | } | 711 | } |
635 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 712 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -656,7 +733,8 @@ sba_prep_dma_memcpy_req(struct sba_device *sba, | |||
656 | req = sba_alloc_request(sba); | 733 | req = sba_alloc_request(sba); |
657 | if (!req) | 734 | if (!req) |
658 | return NULL; | 735 | return NULL; |
659 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 736 | if (flags & DMA_PREP_FENCE) |
737 | req->flags |= SBA_REQUEST_FENCE; | ||
660 | 738 | ||
661 | /* Fillup request message */ | 739 | /* Fillup request message */ |
662 | sba_fillup_memcpy_msg(req, req->cmds, &req->msg, | 740 | sba_fillup_memcpy_msg(req, req->cmds, &req->msg, |
@@ -711,6 +789,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, | |||
711 | u64 cmd; | 789 | u64 cmd; |
712 | u32 c_mdata; | 790 | u32 c_mdata; |
713 | unsigned int i; | 791 | unsigned int i; |
792 | dma_addr_t resp_dma = req->tx.phys; | ||
714 | struct brcm_sba_command *cmdsp = cmds; | 793 | struct brcm_sba_command *cmdsp = cmds; |
715 | 794 | ||
716 | /* Type-B command to load data into buf0 */ | 795 | /* Type-B command to load data into buf0 */ |
@@ -766,7 +845,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, | |||
766 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 845 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
767 | if (req->sba->hw_resp_size) { | 846 | if (req->sba->hw_resp_size) { |
768 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 847 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
769 | cmdsp->resp = req->resp_dma; | 848 | cmdsp->resp = resp_dma; |
770 | cmdsp->resp_len = req->sba->hw_resp_size; | 849 | cmdsp->resp_len = req->sba->hw_resp_size; |
771 | } | 850 | } |
772 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 851 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -782,7 +861,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, | |||
782 | msg->error = 0; | 861 | msg->error = 0; |
783 | } | 862 | } |
784 | 863 | ||
785 | struct sba_request * | 864 | static struct sba_request * |
786 | sba_prep_dma_xor_req(struct sba_device *sba, | 865 | sba_prep_dma_xor_req(struct sba_device *sba, |
787 | dma_addr_t off, dma_addr_t dst, dma_addr_t *src, | 866 | dma_addr_t off, dma_addr_t dst, dma_addr_t *src, |
788 | u32 src_cnt, size_t len, unsigned long flags) | 867 | u32 src_cnt, size_t len, unsigned long flags) |
@@ -793,7 +872,8 @@ sba_prep_dma_xor_req(struct sba_device *sba, | |||
793 | req = sba_alloc_request(sba); | 872 | req = sba_alloc_request(sba); |
794 | if (!req) | 873 | if (!req) |
795 | return NULL; | 874 | return NULL; |
796 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 875 | if (flags & DMA_PREP_FENCE) |
876 | req->flags |= SBA_REQUEST_FENCE; | ||
797 | 877 | ||
798 | /* Fillup request message */ | 878 | /* Fillup request message */ |
799 | sba_fillup_xor_msg(req, req->cmds, &req->msg, | 879 | sba_fillup_xor_msg(req, req->cmds, &req->msg, |
@@ -854,6 +934,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
854 | u64 cmd; | 934 | u64 cmd; |
855 | u32 c_mdata; | 935 | u32 c_mdata; |
856 | unsigned int i; | 936 | unsigned int i; |
937 | dma_addr_t resp_dma = req->tx.phys; | ||
857 | struct brcm_sba_command *cmdsp = cmds; | 938 | struct brcm_sba_command *cmdsp = cmds; |
858 | 939 | ||
859 | if (pq_continue) { | 940 | if (pq_continue) { |
@@ -947,7 +1028,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
947 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1028 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
948 | if (req->sba->hw_resp_size) { | 1029 | if (req->sba->hw_resp_size) { |
949 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1030 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
950 | cmdsp->resp = req->resp_dma; | 1031 | cmdsp->resp = resp_dma; |
951 | cmdsp->resp_len = req->sba->hw_resp_size; | 1032 | cmdsp->resp_len = req->sba->hw_resp_size; |
952 | } | 1033 | } |
953 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1034 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -974,7 +1055,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
974 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1055 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
975 | if (req->sba->hw_resp_size) { | 1056 | if (req->sba->hw_resp_size) { |
976 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1057 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
977 | cmdsp->resp = req->resp_dma; | 1058 | cmdsp->resp = resp_dma; |
978 | cmdsp->resp_len = req->sba->hw_resp_size; | 1059 | cmdsp->resp_len = req->sba->hw_resp_size; |
979 | } | 1060 | } |
980 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1061 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -991,7 +1072,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, | |||
991 | msg->error = 0; | 1072 | msg->error = 0; |
992 | } | 1073 | } |
993 | 1074 | ||
994 | struct sba_request * | 1075 | static struct sba_request * |
995 | sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, | 1076 | sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, |
996 | dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, | 1077 | dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, |
997 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) | 1078 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) |
@@ -1002,7 +1083,8 @@ sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, | |||
1002 | req = sba_alloc_request(sba); | 1083 | req = sba_alloc_request(sba); |
1003 | if (!req) | 1084 | if (!req) |
1004 | return NULL; | 1085 | return NULL; |
1005 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 1086 | if (flags & DMA_PREP_FENCE) |
1087 | req->flags |= SBA_REQUEST_FENCE; | ||
1006 | 1088 | ||
1007 | /* Fillup request messages */ | 1089 | /* Fillup request messages */ |
1008 | sba_fillup_pq_msg(req, dmaf_continue(flags), | 1090 | sba_fillup_pq_msg(req, dmaf_continue(flags), |
@@ -1027,6 +1109,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, | |||
1027 | u64 cmd; | 1109 | u64 cmd; |
1028 | u32 c_mdata; | 1110 | u32 c_mdata; |
1029 | u8 pos, dpos = raid6_gflog[scf]; | 1111 | u8 pos, dpos = raid6_gflog[scf]; |
1112 | dma_addr_t resp_dma = req->tx.phys; | ||
1030 | struct brcm_sba_command *cmdsp = cmds; | 1113 | struct brcm_sba_command *cmdsp = cmds; |
1031 | 1114 | ||
1032 | if (!dst_p) | 1115 | if (!dst_p) |
@@ -1105,7 +1188,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, | |||
1105 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1188 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
1106 | if (req->sba->hw_resp_size) { | 1189 | if (req->sba->hw_resp_size) { |
1107 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1190 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
1108 | cmdsp->resp = req->resp_dma; | 1191 | cmdsp->resp = resp_dma; |
1109 | cmdsp->resp_len = req->sba->hw_resp_size; | 1192 | cmdsp->resp_len = req->sba->hw_resp_size; |
1110 | } | 1193 | } |
1111 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1194 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -1226,7 +1309,7 @@ skip_q_computation: | |||
1226 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | 1309 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
1227 | if (req->sba->hw_resp_size) { | 1310 | if (req->sba->hw_resp_size) { |
1228 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | 1311 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
1229 | cmdsp->resp = req->resp_dma; | 1312 | cmdsp->resp = resp_dma; |
1230 | cmdsp->resp_len = req->sba->hw_resp_size; | 1313 | cmdsp->resp_len = req->sba->hw_resp_size; |
1231 | } | 1314 | } |
1232 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | 1315 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
@@ -1243,7 +1326,7 @@ skip_q: | |||
1243 | msg->error = 0; | 1326 | msg->error = 0; |
1244 | } | 1327 | } |
1245 | 1328 | ||
1246 | struct sba_request * | 1329 | static struct sba_request * |
1247 | sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, | 1330 | sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, |
1248 | dma_addr_t *dst_p, dma_addr_t *dst_q, | 1331 | dma_addr_t *dst_p, dma_addr_t *dst_q, |
1249 | dma_addr_t src, u8 scf, size_t len, | 1332 | dma_addr_t src, u8 scf, size_t len, |
@@ -1255,7 +1338,8 @@ sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, | |||
1255 | req = sba_alloc_request(sba); | 1338 | req = sba_alloc_request(sba); |
1256 | if (!req) | 1339 | if (!req) |
1257 | return NULL; | 1340 | return NULL; |
1258 | req->fence = (flags & DMA_PREP_FENCE) ? true : false; | 1341 | if (flags & DMA_PREP_FENCE) |
1342 | req->flags |= SBA_REQUEST_FENCE; | ||
1259 | 1343 | ||
1260 | /* Fillup request messages */ | 1344 | /* Fillup request messages */ |
1261 | sba_fillup_pq_single_msg(req, dmaf_continue(flags), | 1345 | sba_fillup_pq_single_msg(req, dmaf_continue(flags), |
@@ -1370,40 +1454,10 @@ fail: | |||
1370 | 1454 | ||
1371 | /* ====== Mailbox callbacks ===== */ | 1455 | /* ====== Mailbox callbacks ===== */ |
1372 | 1456 | ||
1373 | static void sba_dma_tx_actions(struct sba_request *req) | ||
1374 | { | ||
1375 | struct dma_async_tx_descriptor *tx = &req->tx; | ||
1376 | |||
1377 | WARN_ON(tx->cookie < 0); | ||
1378 | |||
1379 | if (tx->cookie > 0) { | ||
1380 | dma_cookie_complete(tx); | ||
1381 | |||
1382 | /* | ||
1383 | * Call the callback (must not sleep or submit new | ||
1384 | * operations to this channel) | ||
1385 | */ | ||
1386 | if (tx->callback) | ||
1387 | tx->callback(tx->callback_param); | ||
1388 | |||
1389 | dma_descriptor_unmap(tx); | ||
1390 | } | ||
1391 | |||
1392 | /* Run dependent operations */ | ||
1393 | dma_run_dependencies(tx); | ||
1394 | |||
1395 | /* If waiting for 'ack' then move to completed list */ | ||
1396 | if (!async_tx_test_ack(&req->tx)) | ||
1397 | sba_complete_chained_requests(req); | ||
1398 | else | ||
1399 | sba_free_chained_requests(req); | ||
1400 | } | ||
1401 | |||
1402 | static void sba_receive_message(struct mbox_client *cl, void *msg) | 1457 | static void sba_receive_message(struct mbox_client *cl, void *msg) |
1403 | { | 1458 | { |
1404 | unsigned long flags; | ||
1405 | struct brcm_message *m = msg; | 1459 | struct brcm_message *m = msg; |
1406 | struct sba_request *req = m->ctx, *req1; | 1460 | struct sba_request *req = m->ctx; |
1407 | struct sba_device *sba = req->sba; | 1461 | struct sba_device *sba = req->sba; |
1408 | 1462 | ||
1409 | /* Error count if message has error */ | 1463 | /* Error count if message has error */ |
@@ -1411,52 +1465,37 @@ static void sba_receive_message(struct mbox_client *cl, void *msg) | |||
1411 | dev_err(sba->dev, "%s got message with error %d", | 1465 | dev_err(sba->dev, "%s got message with error %d", |
1412 | dma_chan_name(&sba->dma_chan), m->error); | 1466 | dma_chan_name(&sba->dma_chan), m->error); |
1413 | 1467 | ||
1414 | /* Mark request as received */ | 1468 | /* Process received request */ |
1415 | sba_received_request(req); | 1469 | sba_process_received_request(sba, req); |
1416 | 1470 | } | |
1417 | /* Wait for all chained requests to be completed */ | ||
1418 | if (atomic_dec_return(&req->first->next_pending_count)) | ||
1419 | goto done; | ||
1420 | |||
1421 | /* Point to first request */ | ||
1422 | req = req->first; | ||
1423 | |||
1424 | /* Update request */ | ||
1425 | if (req->state == SBA_REQUEST_STATE_RECEIVED) | ||
1426 | sba_dma_tx_actions(req); | ||
1427 | else | ||
1428 | sba_free_chained_requests(req); | ||
1429 | 1471 | ||
1430 | spin_lock_irqsave(&sba->reqs_lock, flags); | 1472 | /* ====== Debugfs callbacks ====== */ |
1431 | 1473 | ||
1432 | /* Re-check all completed request waiting for 'ack' */ | 1474 | static int sba_debugfs_stats_show(struct seq_file *file, void *offset) |
1433 | list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) { | 1475 | { |
1434 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 1476 | struct platform_device *pdev = to_platform_device(file->private); |
1435 | sba_dma_tx_actions(req); | 1477 | struct sba_device *sba = platform_get_drvdata(pdev); |
1436 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
1437 | } | ||
1438 | 1478 | ||
1439 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | 1479 | /* Write stats in file */ |
1480 | sba_write_stats_in_seqfile(sba, file); | ||
1440 | 1481 | ||
1441 | done: | 1482 | return 0; |
1442 | /* Try to submit pending request */ | ||
1443 | sba_issue_pending(&sba->dma_chan); | ||
1444 | } | 1483 | } |
1445 | 1484 | ||
1446 | /* ====== Platform driver routines ===== */ | 1485 | /* ====== Platform driver routines ===== */ |
1447 | 1486 | ||
1448 | static int sba_prealloc_channel_resources(struct sba_device *sba) | 1487 | static int sba_prealloc_channel_resources(struct sba_device *sba) |
1449 | { | 1488 | { |
1450 | int i, j, p, ret = 0; | 1489 | int i, j, ret = 0; |
1451 | struct sba_request *req = NULL; | 1490 | struct sba_request *req = NULL; |
1452 | 1491 | ||
1453 | sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev, | 1492 | sba->resp_base = dma_alloc_coherent(sba->mbox_dev, |
1454 | sba->max_resp_pool_size, | 1493 | sba->max_resp_pool_size, |
1455 | &sba->resp_dma_base, GFP_KERNEL); | 1494 | &sba->resp_dma_base, GFP_KERNEL); |
1456 | if (!sba->resp_base) | 1495 | if (!sba->resp_base) |
1457 | return -ENOMEM; | 1496 | return -ENOMEM; |
1458 | 1497 | ||
1459 | sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev, | 1498 | sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, |
1460 | sba->max_cmds_pool_size, | 1499 | sba->max_cmds_pool_size, |
1461 | &sba->cmds_dma_base, GFP_KERNEL); | 1500 | &sba->cmds_dma_base, GFP_KERNEL); |
1462 | if (!sba->cmds_base) { | 1501 | if (!sba->cmds_base) { |
@@ -1469,36 +1508,23 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) | |||
1469 | INIT_LIST_HEAD(&sba->reqs_alloc_list); | 1508 | INIT_LIST_HEAD(&sba->reqs_alloc_list); |
1470 | INIT_LIST_HEAD(&sba->reqs_pending_list); | 1509 | INIT_LIST_HEAD(&sba->reqs_pending_list); |
1471 | INIT_LIST_HEAD(&sba->reqs_active_list); | 1510 | INIT_LIST_HEAD(&sba->reqs_active_list); |
1472 | INIT_LIST_HEAD(&sba->reqs_received_list); | ||
1473 | INIT_LIST_HEAD(&sba->reqs_completed_list); | ||
1474 | INIT_LIST_HEAD(&sba->reqs_aborted_list); | 1511 | INIT_LIST_HEAD(&sba->reqs_aborted_list); |
1475 | INIT_LIST_HEAD(&sba->reqs_free_list); | 1512 | INIT_LIST_HEAD(&sba->reqs_free_list); |
1476 | 1513 | ||
1477 | sba->reqs = devm_kcalloc(sba->dev, sba->max_req, | 1514 | for (i = 0; i < sba->max_req; i++) { |
1478 | sizeof(*req), GFP_KERNEL); | 1515 | req = devm_kzalloc(sba->dev, |
1479 | if (!sba->reqs) { | 1516 | sizeof(*req) + |
1480 | ret = -ENOMEM; | 1517 | sba->max_cmd_per_req * sizeof(req->cmds[0]), |
1481 | goto fail_free_cmds_pool; | 1518 | GFP_KERNEL); |
1482 | } | 1519 | if (!req) { |
1483 | 1520 | ret = -ENOMEM; | |
1484 | for (i = 0, p = 0; i < sba->max_req; i++) { | 1521 | goto fail_free_cmds_pool; |
1485 | req = &sba->reqs[i]; | 1522 | } |
1486 | INIT_LIST_HEAD(&req->node); | 1523 | INIT_LIST_HEAD(&req->node); |
1487 | req->sba = sba; | 1524 | req->sba = sba; |
1488 | req->state = SBA_REQUEST_STATE_FREE; | 1525 | req->flags = SBA_REQUEST_STATE_FREE; |
1489 | INIT_LIST_HEAD(&req->next); | 1526 | INIT_LIST_HEAD(&req->next); |
1490 | req->next_count = 1; | ||
1491 | atomic_set(&req->next_pending_count, 0); | 1527 | atomic_set(&req->next_pending_count, 0); |
1492 | req->fence = false; | ||
1493 | req->resp = sba->resp_base + p; | ||
1494 | req->resp_dma = sba->resp_dma_base + p; | ||
1495 | p += sba->hw_resp_size; | ||
1496 | req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req, | ||
1497 | sizeof(*req->cmds), GFP_KERNEL); | ||
1498 | if (!req->cmds) { | ||
1499 | ret = -ENOMEM; | ||
1500 | goto fail_free_cmds_pool; | ||
1501 | } | ||
1502 | for (j = 0; j < sba->max_cmd_per_req; j++) { | 1528 | for (j = 0; j < sba->max_cmd_per_req; j++) { |
1503 | req->cmds[j].cmd = 0; | 1529 | req->cmds[j].cmd = 0; |
1504 | req->cmds[j].cmd_dma = sba->cmds_base + | 1530 | req->cmds[j].cmd_dma = sba->cmds_base + |
@@ -1509,21 +1535,20 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) | |||
1509 | } | 1535 | } |
1510 | memset(&req->msg, 0, sizeof(req->msg)); | 1536 | memset(&req->msg, 0, sizeof(req->msg)); |
1511 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | 1537 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); |
1538 | async_tx_ack(&req->tx); | ||
1512 | req->tx.tx_submit = sba_tx_submit; | 1539 | req->tx.tx_submit = sba_tx_submit; |
1513 | req->tx.phys = req->resp_dma; | 1540 | req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; |
1514 | list_add_tail(&req->node, &sba->reqs_free_list); | 1541 | list_add_tail(&req->node, &sba->reqs_free_list); |
1515 | } | 1542 | } |
1516 | 1543 | ||
1517 | sba->reqs_free_count = sba->max_req; | ||
1518 | |||
1519 | return 0; | 1544 | return 0; |
1520 | 1545 | ||
1521 | fail_free_cmds_pool: | 1546 | fail_free_cmds_pool: |
1522 | dma_free_coherent(sba->dma_dev.dev, | 1547 | dma_free_coherent(sba->mbox_dev, |
1523 | sba->max_cmds_pool_size, | 1548 | sba->max_cmds_pool_size, |
1524 | sba->cmds_base, sba->cmds_dma_base); | 1549 | sba->cmds_base, sba->cmds_dma_base); |
1525 | fail_free_resp_pool: | 1550 | fail_free_resp_pool: |
1526 | dma_free_coherent(sba->dma_dev.dev, | 1551 | dma_free_coherent(sba->mbox_dev, |
1527 | sba->max_resp_pool_size, | 1552 | sba->max_resp_pool_size, |
1528 | sba->resp_base, sba->resp_dma_base); | 1553 | sba->resp_base, sba->resp_dma_base); |
1529 | return ret; | 1554 | return ret; |
@@ -1532,9 +1557,9 @@ fail_free_resp_pool: | |||
1532 | static void sba_freeup_channel_resources(struct sba_device *sba) | 1557 | static void sba_freeup_channel_resources(struct sba_device *sba) |
1533 | { | 1558 | { |
1534 | dmaengine_terminate_all(&sba->dma_chan); | 1559 | dmaengine_terminate_all(&sba->dma_chan); |
1535 | dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size, | 1560 | dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, |
1536 | sba->cmds_base, sba->cmds_dma_base); | 1561 | sba->cmds_base, sba->cmds_dma_base); |
1537 | dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size, | 1562 | dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, |
1538 | sba->resp_base, sba->resp_dma_base); | 1563 | sba->resp_base, sba->resp_dma_base); |
1539 | sba->resp_base = NULL; | 1564 | sba->resp_base = NULL; |
1540 | sba->resp_dma_base = 0; | 1565 | sba->resp_dma_base = 0; |
@@ -1625,6 +1650,13 @@ static int sba_probe(struct platform_device *pdev) | |||
1625 | sba->dev = &pdev->dev; | 1650 | sba->dev = &pdev->dev; |
1626 | platform_set_drvdata(pdev, sba); | 1651 | platform_set_drvdata(pdev, sba); |
1627 | 1652 | ||
1653 | /* Number of channels equals number of mailbox channels */ | ||
1654 | ret = of_count_phandle_with_args(pdev->dev.of_node, | ||
1655 | "mboxes", "#mbox-cells"); | ||
1656 | if (ret <= 0) | ||
1657 | return -ENODEV; | ||
1658 | mchans_count = ret; | ||
1659 | |||
1628 | /* Determine SBA version from DT compatible string */ | 1660 | /* Determine SBA version from DT compatible string */ |
1629 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) | 1661 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) |
1630 | sba->ver = SBA_VER_1; | 1662 | sba->ver = SBA_VER_1; |
@@ -1637,14 +1669,12 @@ static int sba_probe(struct platform_device *pdev) | |||
1637 | /* Derived Configuration parameters */ | 1669 | /* Derived Configuration parameters */ |
1638 | switch (sba->ver) { | 1670 | switch (sba->ver) { |
1639 | case SBA_VER_1: | 1671 | case SBA_VER_1: |
1640 | sba->max_req = 1024; | ||
1641 | sba->hw_buf_size = 4096; | 1672 | sba->hw_buf_size = 4096; |
1642 | sba->hw_resp_size = 8; | 1673 | sba->hw_resp_size = 8; |
1643 | sba->max_pq_coefs = 6; | 1674 | sba->max_pq_coefs = 6; |
1644 | sba->max_pq_srcs = 6; | 1675 | sba->max_pq_srcs = 6; |
1645 | break; | 1676 | break; |
1646 | case SBA_VER_2: | 1677 | case SBA_VER_2: |
1647 | sba->max_req = 1024; | ||
1648 | sba->hw_buf_size = 4096; | 1678 | sba->hw_buf_size = 4096; |
1649 | sba->hw_resp_size = 8; | 1679 | sba->hw_resp_size = 8; |
1650 | sba->max_pq_coefs = 30; | 1680 | sba->max_pq_coefs = 30; |
@@ -1658,6 +1688,7 @@ static int sba_probe(struct platform_device *pdev) | |||
1658 | default: | 1688 | default: |
1659 | return -EINVAL; | 1689 | return -EINVAL; |
1660 | } | 1690 | } |
1691 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; | ||
1661 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; | 1692 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; |
1662 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; | 1693 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; |
1663 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; | 1694 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; |
@@ -1668,25 +1699,17 @@ static int sba_probe(struct platform_device *pdev) | |||
1668 | sba->client.dev = &pdev->dev; | 1699 | sba->client.dev = &pdev->dev; |
1669 | sba->client.rx_callback = sba_receive_message; | 1700 | sba->client.rx_callback = sba_receive_message; |
1670 | sba->client.tx_block = false; | 1701 | sba->client.tx_block = false; |
1671 | sba->client.knows_txdone = false; | 1702 | sba->client.knows_txdone = true; |
1672 | sba->client.tx_tout = 0; | 1703 | sba->client.tx_tout = 0; |
1673 | 1704 | ||
1674 | /* Number of channels equals number of mailbox channels */ | ||
1675 | ret = of_count_phandle_with_args(pdev->dev.of_node, | ||
1676 | "mboxes", "#mbox-cells"); | ||
1677 | if (ret <= 0) | ||
1678 | return -ENODEV; | ||
1679 | mchans_count = ret; | ||
1680 | sba->mchans_count = 0; | ||
1681 | atomic_set(&sba->mchans_current, 0); | ||
1682 | |||
1683 | /* Allocate mailbox channel array */ | 1705 | /* Allocate mailbox channel array */ |
1684 | sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count, | 1706 | sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, |
1685 | sizeof(*sba->mchans), GFP_KERNEL); | 1707 | sizeof(*sba->mchans), GFP_KERNEL); |
1686 | if (!sba->mchans) | 1708 | if (!sba->mchans) |
1687 | return -ENOMEM; | 1709 | return -ENOMEM; |
1688 | 1710 | ||
1689 | /* Request mailbox channels */ | 1711 | /* Request mailbox channels */ |
1712 | sba->mchans_count = 0; | ||
1690 | for (i = 0; i < mchans_count; i++) { | 1713 | for (i = 0; i < mchans_count; i++) { |
1691 | sba->mchans[i] = mbox_request_channel(&sba->client, i); | 1714 | sba->mchans[i] = mbox_request_channel(&sba->client, i); |
1692 | if (IS_ERR(sba->mchans[i])) { | 1715 | if (IS_ERR(sba->mchans[i])) { |
@@ -1695,6 +1718,7 @@ static int sba_probe(struct platform_device *pdev) | |||
1695 | } | 1718 | } |
1696 | sba->mchans_count++; | 1719 | sba->mchans_count++; |
1697 | } | 1720 | } |
1721 | atomic_set(&sba->mchans_current, 0); | ||
1698 | 1722 | ||
1699 | /* Find-out underlying mailbox device */ | 1723 | /* Find-out underlying mailbox device */ |
1700 | ret = of_parse_phandle_with_args(pdev->dev.of_node, | 1724 | ret = of_parse_phandle_with_args(pdev->dev.of_node, |
@@ -1723,15 +1747,34 @@ static int sba_probe(struct platform_device *pdev) | |||
1723 | } | 1747 | } |
1724 | } | 1748 | } |
1725 | 1749 | ||
1726 | /* Register DMA device with linux async framework */ | 1750 | /* Prealloc channel resource */ |
1727 | ret = sba_async_register(sba); | 1751 | ret = sba_prealloc_channel_resources(sba); |
1728 | if (ret) | 1752 | if (ret) |
1729 | goto fail_free_mchans; | 1753 | goto fail_free_mchans; |
1730 | 1754 | ||
1731 | /* Prealloc channel resource */ | 1755 | /* Check availability of debugfs */ |
1732 | ret = sba_prealloc_channel_resources(sba); | 1756 | if (!debugfs_initialized()) |
1757 | goto skip_debugfs; | ||
1758 | |||
1759 | /* Create debugfs root entry */ | ||
1760 | sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); | ||
1761 | if (IS_ERR_OR_NULL(sba->root)) { | ||
1762 | dev_err(sba->dev, "failed to create debugfs root entry\n"); | ||
1763 | sba->root = NULL; | ||
1764 | goto skip_debugfs; | ||
1765 | } | ||
1766 | |||
1767 | /* Create debugfs stats entry */ | ||
1768 | sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, | ||
1769 | sba_debugfs_stats_show); | ||
1770 | if (IS_ERR_OR_NULL(sba->stats)) | ||
1771 | dev_err(sba->dev, "failed to create debugfs stats file\n"); | ||
1772 | skip_debugfs: | ||
1773 | |||
1774 | /* Register DMA device with Linux async framework */ | ||
1775 | ret = sba_async_register(sba); | ||
1733 | if (ret) | 1776 | if (ret) |
1734 | goto fail_async_dev_unreg; | 1777 | goto fail_free_resources; |
1735 | 1778 | ||
1736 | /* Print device info */ | 1779 | /* Print device info */ |
1737 | dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", | 1780 | dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", |
@@ -1740,8 +1783,9 @@ static int sba_probe(struct platform_device *pdev) | |||
1740 | 1783 | ||
1741 | return 0; | 1784 | return 0; |
1742 | 1785 | ||
1743 | fail_async_dev_unreg: | 1786 | fail_free_resources: |
1744 | dma_async_device_unregister(&sba->dma_dev); | 1787 | debugfs_remove_recursive(sba->root); |
1788 | sba_freeup_channel_resources(sba); | ||
1745 | fail_free_mchans: | 1789 | fail_free_mchans: |
1746 | for (i = 0; i < sba->mchans_count; i++) | 1790 | for (i = 0; i < sba->mchans_count; i++) |
1747 | mbox_free_channel(sba->mchans[i]); | 1791 | mbox_free_channel(sba->mchans[i]); |
@@ -1753,10 +1797,12 @@ static int sba_remove(struct platform_device *pdev) | |||
1753 | int i; | 1797 | int i; |
1754 | struct sba_device *sba = platform_get_drvdata(pdev); | 1798 | struct sba_device *sba = platform_get_drvdata(pdev); |
1755 | 1799 | ||
1756 | sba_freeup_channel_resources(sba); | ||
1757 | |||
1758 | dma_async_device_unregister(&sba->dma_dev); | 1800 | dma_async_device_unregister(&sba->dma_dev); |
1759 | 1801 | ||
1802 | debugfs_remove_recursive(sba->root); | ||
1803 | |||
1804 | sba_freeup_channel_resources(sba); | ||
1805 | |||
1760 | for (i = 0; i < sba->mchans_count; i++) | 1806 | for (i = 0; i < sba->mchans_count; i++) |
1761 | mbox_free_channel(sba->mchans[i]); | 1807 | mbox_free_channel(sba->mchans[i]); |
1762 | 1808 | ||