diff options
Diffstat (limited to 'drivers/spi/amba-pl022.c')
-rw-r--r-- | drivers/spi/amba-pl022.c | 56 |
1 files changed, 26 insertions, 30 deletions
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index fb3d1b31772d..a2a5921c730a 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c | |||
@@ -253,11 +253,6 @@ | |||
253 | #define STATE_ERROR ((void *) -1) | 253 | #define STATE_ERROR ((void *) -1) |
254 | 254 | ||
255 | /* | 255 | /* |
256 | * Queue State | ||
257 | */ | ||
258 | #define QUEUE_RUNNING (0) | ||
259 | #define QUEUE_STOPPED (1) | ||
260 | /* | ||
261 | * SSP State - Whether Enabled or Disabled | 256 | * SSP State - Whether Enabled or Disabled |
262 | */ | 257 | */ |
263 | #define SSP_DISABLED (0) | 258 | #define SSP_DISABLED (0) |
@@ -344,7 +339,7 @@ struct vendor_data { | |||
344 | * @lock: spinlock to syncronise access to driver data | 339 | * @lock: spinlock to syncronise access to driver data |
345 | * @workqueue: a workqueue on which any spi_message request is queued | 340 | * @workqueue: a workqueue on which any spi_message request is queued |
346 | * @busy: workqueue is busy | 341 | * @busy: workqueue is busy |
347 | * @run: workqueue is running | 342 | * @running: workqueue is running |
348 | * @pump_transfers: Tasklet used in Interrupt Transfer mode | 343 | * @pump_transfers: Tasklet used in Interrupt Transfer mode |
349 | * @cur_msg: Pointer to current spi_message being processed | 344 | * @cur_msg: Pointer to current spi_message being processed |
350 | * @cur_transfer: Pointer to current spi_transfer | 345 | * @cur_transfer: Pointer to current spi_transfer |
@@ -369,8 +364,8 @@ struct pl022 { | |||
369 | struct work_struct pump_messages; | 364 | struct work_struct pump_messages; |
370 | spinlock_t queue_lock; | 365 | spinlock_t queue_lock; |
371 | struct list_head queue; | 366 | struct list_head queue; |
372 | int busy; | 367 | bool busy; |
373 | int run; | 368 | bool running; |
374 | /* Message transfer pump */ | 369 | /* Message transfer pump */ |
375 | struct tasklet_struct pump_transfers; | 370 | struct tasklet_struct pump_transfers; |
376 | struct spi_message *cur_msg; | 371 | struct spi_message *cur_msg; |
@@ -782,9 +777,9 @@ static void *next_transfer(struct pl022 *pl022) | |||
782 | static void unmap_free_dma_scatter(struct pl022 *pl022) | 777 | static void unmap_free_dma_scatter(struct pl022 *pl022) |
783 | { | 778 | { |
784 | /* Unmap and free the SG tables */ | 779 | /* Unmap and free the SG tables */ |
785 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, | 780 | dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, |
786 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | 781 | pl022->sgt_tx.nents, DMA_TO_DEVICE); |
787 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, | 782 | dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, |
788 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | 783 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); |
789 | sg_free_table(&pl022->sgt_rx); | 784 | sg_free_table(&pl022->sgt_rx); |
790 | sg_free_table(&pl022->sgt_tx); | 785 | sg_free_table(&pl022->sgt_tx); |
@@ -917,7 +912,7 @@ static int configure_dma(struct pl022 *pl022) | |||
917 | }; | 912 | }; |
918 | unsigned int pages; | 913 | unsigned int pages; |
919 | int ret; | 914 | int ret; |
920 | int sglen; | 915 | int rx_sglen, tx_sglen; |
921 | struct dma_chan *rxchan = pl022->dma_rx_channel; | 916 | struct dma_chan *rxchan = pl022->dma_rx_channel; |
922 | struct dma_chan *txchan = pl022->dma_tx_channel; | 917 | struct dma_chan *txchan = pl022->dma_tx_channel; |
923 | struct dma_async_tx_descriptor *rxdesc; | 918 | struct dma_async_tx_descriptor *rxdesc; |
@@ -956,7 +951,7 @@ static int configure_dma(struct pl022 *pl022) | |||
956 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | 951 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
957 | break; | 952 | break; |
958 | case WRITING_U32: | 953 | case WRITING_U32: |
959 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; | 954 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
960 | break; | 955 | break; |
961 | } | 956 | } |
962 | 957 | ||
@@ -991,20 +986,20 @@ static int configure_dma(struct pl022 *pl022) | |||
991 | pl022->cur_transfer->len, &pl022->sgt_tx); | 986 | pl022->cur_transfer->len, &pl022->sgt_tx); |
992 | 987 | ||
993 | /* Map DMA buffers */ | 988 | /* Map DMA buffers */ |
994 | sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, | 989 | rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, |
995 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | 990 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); |
996 | if (!sglen) | 991 | if (!rx_sglen) |
997 | goto err_rx_sgmap; | 992 | goto err_rx_sgmap; |
998 | 993 | ||
999 | sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, | 994 | tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, |
1000 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | 995 | pl022->sgt_tx.nents, DMA_TO_DEVICE); |
1001 | if (!sglen) | 996 | if (!tx_sglen) |
1002 | goto err_tx_sgmap; | 997 | goto err_tx_sgmap; |
1003 | 998 | ||
1004 | /* Send both scatterlists */ | 999 | /* Send both scatterlists */ |
1005 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | 1000 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, |
1006 | pl022->sgt_rx.sgl, | 1001 | pl022->sgt_rx.sgl, |
1007 | pl022->sgt_rx.nents, | 1002 | rx_sglen, |
1008 | DMA_FROM_DEVICE, | 1003 | DMA_FROM_DEVICE, |
1009 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1004 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1010 | if (!rxdesc) | 1005 | if (!rxdesc) |
@@ -1012,7 +1007,7 @@ static int configure_dma(struct pl022 *pl022) | |||
1012 | 1007 | ||
1013 | txdesc = txchan->device->device_prep_slave_sg(txchan, | 1008 | txdesc = txchan->device->device_prep_slave_sg(txchan, |
1014 | pl022->sgt_tx.sgl, | 1009 | pl022->sgt_tx.sgl, |
1015 | pl022->sgt_tx.nents, | 1010 | tx_sglen, |
1016 | DMA_TO_DEVICE, | 1011 | DMA_TO_DEVICE, |
1017 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1012 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1018 | if (!txdesc) | 1013 | if (!txdesc) |
@@ -1040,10 +1035,10 @@ err_txdesc: | |||
1040 | txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); | 1035 | txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); |
1041 | err_rxdesc: | 1036 | err_rxdesc: |
1042 | rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); | 1037 | rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); |
1043 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, | 1038 | dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, |
1044 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | 1039 | pl022->sgt_tx.nents, DMA_TO_DEVICE); |
1045 | err_tx_sgmap: | 1040 | err_tx_sgmap: |
1046 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, | 1041 | dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, |
1047 | pl022->sgt_tx.nents, DMA_FROM_DEVICE); | 1042 | pl022->sgt_tx.nents, DMA_FROM_DEVICE); |
1048 | err_rx_sgmap: | 1043 | err_rx_sgmap: |
1049 | sg_free_table(&pl022->sgt_tx); | 1044 | sg_free_table(&pl022->sgt_tx); |
@@ -1460,8 +1455,8 @@ static void pump_messages(struct work_struct *work) | |||
1460 | 1455 | ||
1461 | /* Lock queue and check for queue work */ | 1456 | /* Lock queue and check for queue work */ |
1462 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1457 | spin_lock_irqsave(&pl022->queue_lock, flags); |
1463 | if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) { | 1458 | if (list_empty(&pl022->queue) || !pl022->running) { |
1464 | pl022->busy = 0; | 1459 | pl022->busy = false; |
1465 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1460 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1466 | return; | 1461 | return; |
1467 | } | 1462 | } |
@@ -1475,7 +1470,7 @@ static void pump_messages(struct work_struct *work) | |||
1475 | list_entry(pl022->queue.next, struct spi_message, queue); | 1470 | list_entry(pl022->queue.next, struct spi_message, queue); |
1476 | 1471 | ||
1477 | list_del_init(&pl022->cur_msg->queue); | 1472 | list_del_init(&pl022->cur_msg->queue); |
1478 | pl022->busy = 1; | 1473 | pl022->busy = true; |
1479 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1474 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1480 | 1475 | ||
1481 | /* Initial message state */ | 1476 | /* Initial message state */ |
@@ -1507,8 +1502,8 @@ static int __init init_queue(struct pl022 *pl022) | |||
1507 | INIT_LIST_HEAD(&pl022->queue); | 1502 | INIT_LIST_HEAD(&pl022->queue); |
1508 | spin_lock_init(&pl022->queue_lock); | 1503 | spin_lock_init(&pl022->queue_lock); |
1509 | 1504 | ||
1510 | pl022->run = QUEUE_STOPPED; | 1505 | pl022->running = false; |
1511 | pl022->busy = 0; | 1506 | pl022->busy = false; |
1512 | 1507 | ||
1513 | tasklet_init(&pl022->pump_transfers, | 1508 | tasklet_init(&pl022->pump_transfers, |
1514 | pump_transfers, (unsigned long)pl022); | 1509 | pump_transfers, (unsigned long)pl022); |
@@ -1529,12 +1524,12 @@ static int start_queue(struct pl022 *pl022) | |||
1529 | 1524 | ||
1530 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1525 | spin_lock_irqsave(&pl022->queue_lock, flags); |
1531 | 1526 | ||
1532 | if (pl022->run == QUEUE_RUNNING || pl022->busy) { | 1527 | if (pl022->running || pl022->busy) { |
1533 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1528 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1534 | return -EBUSY; | 1529 | return -EBUSY; |
1535 | } | 1530 | } |
1536 | 1531 | ||
1537 | pl022->run = QUEUE_RUNNING; | 1532 | pl022->running = true; |
1538 | pl022->cur_msg = NULL; | 1533 | pl022->cur_msg = NULL; |
1539 | pl022->cur_transfer = NULL; | 1534 | pl022->cur_transfer = NULL; |
1540 | pl022->cur_chip = NULL; | 1535 | pl022->cur_chip = NULL; |
@@ -1566,7 +1561,8 @@ static int stop_queue(struct pl022 *pl022) | |||
1566 | 1561 | ||
1567 | if (!list_empty(&pl022->queue) || pl022->busy) | 1562 | if (!list_empty(&pl022->queue) || pl022->busy) |
1568 | status = -EBUSY; | 1563 | status = -EBUSY; |
1569 | else pl022->run = QUEUE_STOPPED; | 1564 | else |
1565 | pl022->running = false; | ||
1570 | 1566 | ||
1571 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1567 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1572 | 1568 | ||
@@ -1684,7 +1680,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) | |||
1684 | 1680 | ||
1685 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1681 | spin_lock_irqsave(&pl022->queue_lock, flags); |
1686 | 1682 | ||
1687 | if (pl022->run == QUEUE_STOPPED) { | 1683 | if (!pl022->running) { |
1688 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1684 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1689 | return -ESHUTDOWN; | 1685 | return -ESHUTDOWN; |
1690 | } | 1686 | } |
@@ -1693,7 +1689,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) | |||
1693 | msg->state = STATE_START; | 1689 | msg->state = STATE_START; |
1694 | 1690 | ||
1695 | list_add_tail(&msg->queue, &pl022->queue); | 1691 | list_add_tail(&msg->queue, &pl022->queue); |
1696 | if (pl022->run == QUEUE_RUNNING && !pl022->busy) | 1692 | if (pl022->running && !pl022->busy) |
1697 | queue_work(pl022->workqueue, &pl022->pump_messages); | 1693 | queue_work(pl022->workqueue, &pl022->pump_messages); |
1698 | 1694 | ||
1699 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1695 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |