aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-sirf.c
diff options
context:
space:
mode:
authorBarry Song <21cnbao@gmail.com>2013-08-06 02:21:21 -0400
committerMark Brown <broonie@linaro.org>2013-08-06 06:10:01 -0400
commitde39f5fa09d006561958431779c5a5e5b5b4e0ea (patch)
treefb86282f68c4de7b3d7596039e366a9ba5a630e1 /drivers/spi/spi-sirf.c
parent94b1f0dfa6dd8a3ed303cc7b0034b17e9cc34824 (diff)
spi: sirf: use DMA if both buffer address and length are aligned
this patch enables DMA support for SiRFSoC SPI driver, if both buffers and length are aligned with DMA controller's hardware limitation, use generic SiRF generic dmaengine driver. for PIO, SiRF SPI controller actually is using rx to trigger rx, that means if we write any word to tx fifo, we will get a word from rx fifo. for DMA, we use two different channel for tx and rx, and issue them both for every transfer. Signed-off-by: Barry Song <Baohua.Song@csr.com> Signed-off-by: Mark Brown <broonie@linaro.org>
Diffstat (limited to 'drivers/spi/spi-sirf.c')
-rw-r--r--drivers/spi/spi-sirf.c180
1 files changed, 157 insertions, 23 deletions
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 96087169296e..62c92c334260 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -19,6 +19,10 @@
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/spi/spi_bitbang.h> 21#include <linux/spi/spi_bitbang.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-direction.h>
24#include <linux/dma-mapping.h>
25#include <linux/sirfsoc_dma.h>
22 26
23#define DRIVER_NAME "sirfsoc_spi" 27#define DRIVER_NAME "sirfsoc_spi"
24 28
@@ -119,9 +123,20 @@
119#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20) 123#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
120#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2) 124#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
121 125
126/*
127 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
128 * due to the limitation of dma controller
129 */
130
131#define ALIGNED(x) (!((u32)x & 0x3))
132#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
133 ALIGNED(x->len * sspi->word_width) && (x->len * sspi->word_width < \
134 2 * PAGE_SIZE))
135
122struct sirfsoc_spi { 136struct sirfsoc_spi {
123 struct spi_bitbang bitbang; 137 struct spi_bitbang bitbang;
124 struct completion done; 138 struct completion rx_done;
139 struct completion tx_done;
125 140
126 void __iomem *base; 141 void __iomem *base;
127 u32 ctrl_freq; /* SPI controller clock speed */ 142 u32 ctrl_freq; /* SPI controller clock speed */
@@ -140,6 +155,14 @@ struct sirfsoc_spi {
140 unsigned int left_tx_cnt; 155 unsigned int left_tx_cnt;
141 unsigned int left_rx_cnt; 156 unsigned int left_rx_cnt;
142 157
158 /* rx & tx DMA channels */
159 struct dma_chan *rx_chan;
160 struct dma_chan *tx_chan;
161 dma_addr_t src_start;
162 dma_addr_t dst_start;
163 void *dummypage;
164 int word_width; /* in bytes */
165
143 int chipselect[0]; 166 int chipselect[0];
144}; 167};
145 168
@@ -241,7 +264,7 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
241 /* Error Conditions */ 264 /* Error Conditions */
242 if (spi_stat & SIRFSOC_SPI_RX_OFLOW || 265 if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
243 spi_stat & SIRFSOC_SPI_TX_UFLOW) { 266 spi_stat & SIRFSOC_SPI_TX_UFLOW) {
244 complete(&sspi->done); 267 complete(&sspi->rx_done);
245 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 268 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
246 } 269 }
247 270
@@ -261,22 +284,30 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
261 284
262 /* Received all words */ 285 /* Received all words */
263 if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) { 286 if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) {
264 complete(&sspi->done); 287 complete(&sspi->rx_done);
265 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 288 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
266 } 289 }
267 return IRQ_HANDLED; 290 return IRQ_HANDLED;
268} 291}
269 292
293static void spi_sirfsoc_dma_fini_callback(void *data)
294{
295 struct completion *dma_complete = data;
296
297 complete(dma_complete);
298}
299
270static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) 300static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
271{ 301{
272 struct sirfsoc_spi *sspi; 302 struct sirfsoc_spi *sspi;
273 int timeout = t->len * 10; 303 int timeout = t->len * 10;
274 sspi = spi_master_get_devdata(spi->master); 304 sspi = spi_master_get_devdata(spi->master);
275 305
276 sspi->tx = t->tx_buf; 306 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
277 sspi->rx = t->rx_buf; 307 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
278 sspi->left_tx_cnt = sspi->left_rx_cnt = t->len; 308 sspi->left_tx_cnt = sspi->left_rx_cnt = t->len;
279 INIT_COMPLETION(sspi->done); 309 INIT_COMPLETION(sspi->rx_done);
310 INIT_COMPLETION(sspi->tx_done);
280 311
281 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); 312 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
282 313
@@ -305,17 +336,65 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
305 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 336 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
306 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 337 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
307 338
308 /* Send the first word to trigger the whole tx/rx process */ 339 if (IS_DMA_VALID(t)) {
309 sspi->tx_word(sspi); 340 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
341 unsigned int size = t->len * sspi->word_width;
342
343 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE);
344 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
345 sspi->dst_start, size, DMA_DEV_TO_MEM,
346 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
347 rx_desc->callback = spi_sirfsoc_dma_fini_callback;
348 rx_desc->callback_param = &sspi->rx_done;
349
350 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE);
351 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
352 sspi->src_start, size, DMA_MEM_TO_DEV,
353 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
354 tx_desc->callback = spi_sirfsoc_dma_fini_callback;
355 tx_desc->callback_param = &sspi->tx_done;
356
357 dmaengine_submit(tx_desc);
358 dmaengine_submit(rx_desc);
359 dma_async_issue_pending(sspi->tx_chan);
360 dma_async_issue_pending(sspi->rx_chan);
361 } else {
362 /* Send the first word to trigger the whole tx/rx process */
363 sspi->tx_word(sspi);
364
365 writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
366 SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
367 SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
368 SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
369 }
310 370
311 writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
312 SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
313 SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
314 SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
315 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN); 371 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
316 372
317 if (wait_for_completion_timeout(&sspi->done, timeout) == 0) 373 if (!IS_DMA_VALID(t)) { /* for PIO */
374 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0)
375 dev_err(&spi->dev, "transfer timeout\n");
376 } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
318 dev_err(&spi->dev, "transfer timeout\n"); 377 dev_err(&spi->dev, "transfer timeout\n");
378 dmaengine_terminate_all(sspi->rx_chan);
379 } else
380 sspi->left_rx_cnt = 0;
381
382 /*
383 * we only wait tx-done event if transferring by DMA. for PIO,
384 * we get rx data by writing tx data, so if rx is done, tx has
385 * done earlier
386 */
387 if (IS_DMA_VALID(t)) {
388 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
389 dev_err(&spi->dev, "transfer timeout\n");
390 dmaengine_terminate_all(sspi->tx_chan);
391 }
392 }
393
394 if (IS_DMA_VALID(t)) {
395 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
396 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
397 }
319 398
320 /* TX, RX FIFO stop */ 399 /* TX, RX FIFO stop */
321 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 400 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
@@ -332,7 +411,6 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
332 411
333 if (sspi->chipselect[spi->chip_select] == 0) { 412 if (sspi->chipselect[spi->chip_select] == 0) {
334 u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL); 413 u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
335 regval |= SIRFSOC_SPI_CS_IO_OUT;
336 switch (value) { 414 switch (value) {
337 case BITBANG_CS_ACTIVE: 415 case BITBANG_CS_ACTIVE:
338 if (spi->mode & SPI_CS_HIGH) 416 if (spi->mode & SPI_CS_HIGH)
@@ -369,11 +447,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
369 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 447 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
370 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; 448 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
371 449
372 /* Enable IO mode for RX, TX */
373 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
374 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
375 regval = (sspi->ctrl_freq / (2 * hz)) - 1; 450 regval = (sspi->ctrl_freq / (2 * hz)) - 1;
376
377 if (regval > 0xFFFF || regval < 0) { 451 if (regval > 0xFFFF || regval < 0) {
378 dev_err(&spi->dev, "Speed %d not supported\n", hz); 452 dev_err(&spi->dev, "Speed %d not supported\n", hz);
379 return -EINVAL; 453 return -EINVAL;
@@ -388,6 +462,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
388 SIRFSOC_SPI_FIFO_WIDTH_BYTE; 462 SIRFSOC_SPI_FIFO_WIDTH_BYTE;
389 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 463 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
390 SIRFSOC_SPI_FIFO_WIDTH_BYTE; 464 SIRFSOC_SPI_FIFO_WIDTH_BYTE;
465 sspi->word_width = 1;
391 break; 466 break;
392 case 12: 467 case 12:
393 case 16: 468 case 16:
@@ -399,6 +474,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
399 SIRFSOC_SPI_FIFO_WIDTH_WORD; 474 SIRFSOC_SPI_FIFO_WIDTH_WORD;
400 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 475 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
401 SIRFSOC_SPI_FIFO_WIDTH_WORD; 476 SIRFSOC_SPI_FIFO_WIDTH_WORD;
477 sspi->word_width = 2;
402 break; 478 break;
403 case 32: 479 case 32:
404 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32; 480 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
@@ -408,6 +484,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
408 SIRFSOC_SPI_FIFO_WIDTH_DWORD; 484 SIRFSOC_SPI_FIFO_WIDTH_DWORD;
409 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 485 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
410 SIRFSOC_SPI_FIFO_WIDTH_DWORD; 486 SIRFSOC_SPI_FIFO_WIDTH_DWORD;
487 sspi->word_width = 4;
411 break; 488 break;
412 default: 489 default:
413 BUG(); 490 BUG();
@@ -442,6 +519,17 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
442 writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL); 519 writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
443 520
444 writel(regval, sspi->base + SIRFSOC_SPI_CTRL); 521 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
522
523 if (IS_DMA_VALID(t)) {
524 /* Enable DMA mode for RX, TX */
525 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
526 writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
527 } else {
528 /* Enable IO mode for RX, TX */
529 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
530 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
531 }
532
445 return 0; 533 return 0;
446} 534}
447 535
@@ -466,6 +554,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
466 struct spi_master *master; 554 struct spi_master *master;
467 struct resource *mem_res; 555 struct resource *mem_res;
468 int num_cs, cs_gpio, irq; 556 int num_cs, cs_gpio, irq;
557 u32 rx_dma_ch, tx_dma_ch;
558 dma_cap_mask_t dma_cap_mask;
469 int i; 559 int i;
470 int ret; 560 int ret;
471 561
@@ -476,6 +566,20 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
476 goto err_cs; 566 goto err_cs;
477 } 567 }
478 568
569 ret = of_property_read_u32(pdev->dev.of_node,
570 "sirf,spi-dma-rx-channel", &rx_dma_ch);
571 if (ret < 0) {
572 dev_err(&pdev->dev, "Unable to get rx dma channel\n");
573 goto err_cs;
574 }
575
576 ret = of_property_read_u32(pdev->dev.of_node,
577 "sirf,spi-dma-tx-channel", &tx_dma_ch);
578 if (ret < 0) {
579 dev_err(&pdev->dev, "Unable to get tx dma channel\n");
580 goto err_cs;
581 }
582
479 master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs); 583 master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
480 if (!master) { 584 if (!master) {
481 dev_err(&pdev->dev, "Unable to allocate SPI master\n"); 585 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
@@ -543,15 +647,33 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
543 SPI_BPW_MASK(16) | SPI_BPW_MASK(32); 647 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
544 sspi->bitbang.master->dev.of_node = pdev->dev.of_node; 648 sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
545 649
650 /* request DMA channels */
651 dma_cap_zero(dma_cap_mask);
652 dma_cap_set(DMA_INTERLEAVE, dma_cap_mask);
653
654 sspi->rx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
655 (void *)rx_dma_ch);
656 if (!sspi->rx_chan) {
657 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
658 goto free_master;
659 }
660 sspi->tx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
661 (void *)tx_dma_ch);
662 if (!sspi->tx_chan) {
663 dev_err(&pdev->dev, "can not allocate tx dma channel\n");
664 goto free_rx_dma;
665 }
666
546 sspi->clk = clk_get(&pdev->dev, NULL); 667 sspi->clk = clk_get(&pdev->dev, NULL);
547 if (IS_ERR(sspi->clk)) { 668 if (IS_ERR(sspi->clk)) {
548 ret = -EINVAL; 669 ret = PTR_ERR(sspi->clk);
549 goto free_master; 670 goto free_tx_dma;
550 } 671 }
551 clk_prepare_enable(sspi->clk); 672 clk_prepare_enable(sspi->clk);
552 sspi->ctrl_freq = clk_get_rate(sspi->clk); 673 sspi->ctrl_freq = clk_get_rate(sspi->clk);
553 674
554 init_completion(&sspi->done); 675 init_completion(&sspi->rx_done);
676 init_completion(&sspi->tx_done);
555 677
556 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 678 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
557 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 679 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
@@ -560,17 +682,26 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
560 /* We are not using dummy delay between command and data */ 682 /* We are not using dummy delay between command and data */
561 writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL); 683 writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
562 684
685 sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
686 if (!sspi->dummypage)
687 goto free_clk;
688
563 ret = spi_bitbang_start(&sspi->bitbang); 689 ret = spi_bitbang_start(&sspi->bitbang);
564 if (ret) 690 if (ret)
565 goto free_clk; 691 goto free_dummypage;
566 692
567 dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); 693 dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
568 694
569 return 0; 695 return 0;
570 696free_dummypage:
697 kfree(sspi->dummypage);
571free_clk: 698free_clk:
572 clk_disable_unprepare(sspi->clk); 699 clk_disable_unprepare(sspi->clk);
573 clk_put(sspi->clk); 700 clk_put(sspi->clk);
701free_tx_dma:
702 dma_release_channel(sspi->tx_chan);
703free_rx_dma:
704 dma_release_channel(sspi->rx_chan);
574free_master: 705free_master:
575 spi_master_put(master); 706 spi_master_put(master);
576err_cs: 707err_cs:
@@ -591,8 +722,11 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
591 if (sspi->chipselect[i] > 0) 722 if (sspi->chipselect[i] > 0)
592 gpio_free(sspi->chipselect[i]); 723 gpio_free(sspi->chipselect[i]);
593 } 724 }
725 kfree(sspi->dummypage);
594 clk_disable_unprepare(sspi->clk); 726 clk_disable_unprepare(sspi->clk);
595 clk_put(sspi->clk); 727 clk_put(sspi->clk);
728 dma_release_channel(sspi->rx_chan);
729 dma_release_channel(sspi->tx_chan);
596 spi_master_put(master); 730 spi_master_put(master);
597 return 0; 731 return 0;
598} 732}