diff options
author | Mark Brown <broonie@linaro.org> | 2013-09-01 08:49:09 -0400 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2013-09-01 08:49:09 -0400 |
commit | 2dc745b6ef6d72cec863e93554df9108061c6ffd (patch) | |
tree | 95a75d61584dea85bdb867ee35d2317eccaa23e1 /drivers/spi | |
parent | 121a39661b8424d4207c94d46b84c924724c47bd (diff) | |
parent | c12f964357fc6de54252a4134720083d687caa22 (diff) |
Merge remote-tracking branch 'spi/topic/s3c64xx' into spi-next
Diffstat (limited to 'drivers/spi')
-rw-r--r-- | drivers/spi/spi-s3c64xx.c | 112 |
1 files changed, 43 insertions, 69 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index c5bc96123c1b..512b8893893b 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -172,7 +172,6 @@ struct s3c64xx_spi_port_config { | |||
172 | * @master: Pointer to the SPI Protocol master. | 172 | * @master: Pointer to the SPI Protocol master. |
173 | * @cntrlr_info: Platform specific data for the controller this driver manages. | 173 | * @cntrlr_info: Platform specific data for the controller this driver manages. |
174 | * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. | 174 | * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. |
175 | * @queue: To log SPI xfer requests. | ||
176 | * @lock: Controller specific lock. | 175 | * @lock: Controller specific lock. |
177 | * @state: Set of FLAGS to indicate status. | 176 | * @state: Set of FLAGS to indicate status. |
178 | * @rx_dmach: Controller's DMA channel for Rx. | 177 | * @rx_dmach: Controller's DMA channel for Rx. |
@@ -193,7 +192,6 @@ struct s3c64xx_spi_driver_data { | |||
193 | struct spi_master *master; | 192 | struct spi_master *master; |
194 | struct s3c64xx_spi_info *cntrlr_info; | 193 | struct s3c64xx_spi_info *cntrlr_info; |
195 | struct spi_device *tgl_spi; | 194 | struct spi_device *tgl_spi; |
196 | struct list_head queue; | ||
197 | spinlock_t lock; | 195 | spinlock_t lock; |
198 | unsigned long sfr_start; | 196 | unsigned long sfr_start; |
199 | struct completion xfer_completion; | 197 | struct completion xfer_completion; |
@@ -338,8 +336,10 @@ static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) | |||
338 | req.cap = DMA_SLAVE; | 336 | req.cap = DMA_SLAVE; |
339 | req.client = &s3c64xx_spi_dma_client; | 337 | req.client = &s3c64xx_spi_dma_client; |
340 | 338 | ||
341 | sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx"); | 339 | sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( |
342 | sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx"); | 340 | sdd->rx_dma.dmach, &req, dev, "rx"); |
341 | sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( | ||
342 | sdd->tx_dma.dmach, &req, dev, "tx"); | ||
343 | 343 | ||
344 | return 1; | 344 | return 1; |
345 | } | 345 | } |
@@ -386,9 +386,10 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
386 | { | 386 | { |
387 | struct s3c64xx_spi_driver_data *sdd; | 387 | struct s3c64xx_spi_driver_data *sdd; |
388 | struct dma_slave_config config; | 388 | struct dma_slave_config config; |
389 | struct scatterlist sg; | ||
390 | struct dma_async_tx_descriptor *desc; | 389 | struct dma_async_tx_descriptor *desc; |
391 | 390 | ||
391 | memset(&config, 0, sizeof(config)); | ||
392 | |||
392 | if (dma->direction == DMA_DEV_TO_MEM) { | 393 | if (dma->direction == DMA_DEV_TO_MEM) { |
393 | sdd = container_of((void *)dma, | 394 | sdd = container_of((void *)dma, |
394 | struct s3c64xx_spi_driver_data, rx_dma); | 395 | struct s3c64xx_spi_driver_data, rx_dma); |
@@ -407,14 +408,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
407 | dmaengine_slave_config(dma->ch, &config); | 408 | dmaengine_slave_config(dma->ch, &config); |
408 | } | 409 | } |
409 | 410 | ||
410 | sg_init_table(&sg, 1); | 411 | desc = dmaengine_prep_slave_single(dma->ch, buf, len, |
411 | sg_dma_len(&sg) = len; | 412 | dma->direction, DMA_PREP_INTERRUPT); |
412 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)), | ||
413 | len, offset_in_page(buf)); | ||
414 | sg_dma_address(&sg) = buf; | ||
415 | |||
416 | desc = dmaengine_prep_slave_sg(dma->ch, | ||
417 | &sg, 1, dma->direction, DMA_PREP_INTERRUPT); | ||
418 | 413 | ||
419 | desc->callback = s3c64xx_spi_dmacb; | 414 | desc->callback = s3c64xx_spi_dmacb; |
420 | desc->callback_param = dma; | 415 | desc->callback_param = dma; |
@@ -431,27 +426,26 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
431 | dma_cap_mask_t mask; | 426 | dma_cap_mask_t mask; |
432 | int ret; | 427 | int ret; |
433 | 428 | ||
434 | if (is_polling(sdd)) | 429 | if (!is_polling(sdd)) { |
435 | return 0; | 430 | dma_cap_zero(mask); |
436 | 431 | dma_cap_set(DMA_SLAVE, mask); | |
437 | dma_cap_zero(mask); | 432 | |
438 | dma_cap_set(DMA_SLAVE, mask); | 433 | /* Acquire DMA channels */ |
439 | 434 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | |
440 | /* Acquire DMA channels */ | 435 | (void *)sdd->rx_dma.dmach, dev, "rx"); |
441 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 436 | if (!sdd->rx_dma.ch) { |
442 | (void*)sdd->rx_dma.dmach, dev, "rx"); | 437 | dev_err(dev, "Failed to get RX DMA channel\n"); |
443 | if (!sdd->rx_dma.ch) { | 438 | ret = -EBUSY; |
444 | dev_err(dev, "Failed to get RX DMA channel\n"); | 439 | goto out; |
445 | ret = -EBUSY; | 440 | } |
446 | goto out; | ||
447 | } | ||
448 | 441 | ||
449 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 442 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, |
450 | (void*)sdd->tx_dma.dmach, dev, "tx"); | 443 | (void *)sdd->tx_dma.dmach, dev, "tx"); |
451 | if (!sdd->tx_dma.ch) { | 444 | if (!sdd->tx_dma.ch) { |
452 | dev_err(dev, "Failed to get TX DMA channel\n"); | 445 | dev_err(dev, "Failed to get TX DMA channel\n"); |
453 | ret = -EBUSY; | 446 | ret = -EBUSY; |
454 | goto out_rx; | 447 | goto out_rx; |
448 | } | ||
455 | } | 449 | } |
456 | 450 | ||
457 | ret = pm_runtime_get_sync(&sdd->pdev->dev); | 451 | ret = pm_runtime_get_sync(&sdd->pdev->dev); |
@@ -1053,8 +1047,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
1053 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 1047 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
1054 | struct s3c64xx_spi_driver_data *sdd; | 1048 | struct s3c64xx_spi_driver_data *sdd; |
1055 | struct s3c64xx_spi_info *sci; | 1049 | struct s3c64xx_spi_info *sci; |
1056 | struct spi_message *msg; | ||
1057 | unsigned long flags; | ||
1058 | int err; | 1050 | int err; |
1059 | 1051 | ||
1060 | sdd = spi_master_get_devdata(spi->master); | 1052 | sdd = spi_master_get_devdata(spi->master); |
@@ -1068,37 +1060,23 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
1068 | return -ENODEV; | 1060 | return -ENODEV; |
1069 | } | 1061 | } |
1070 | 1062 | ||
1071 | /* Request gpio only if cs line is asserted by gpio pins */ | 1063 | if (!spi_get_ctldata(spi)) { |
1072 | if (sdd->cs_gpio) { | 1064 | /* Request gpio only if cs line is asserted by gpio pins */ |
1073 | err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH, | 1065 | if (sdd->cs_gpio) { |
1074 | dev_name(&spi->dev)); | 1066 | err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH, |
1075 | if (err) { | 1067 | dev_name(&spi->dev)); |
1076 | dev_err(&spi->dev, | 1068 | if (err) { |
1077 | "Failed to get /CS gpio [%d]: %d\n", | 1069 | dev_err(&spi->dev, |
1078 | cs->line, err); | 1070 | "Failed to get /CS gpio [%d]: %d\n", |
1079 | goto err_gpio_req; | 1071 | cs->line, err); |
1072 | goto err_gpio_req; | ||
1073 | } | ||
1080 | } | 1074 | } |
1081 | } | ||
1082 | 1075 | ||
1083 | if (!spi_get_ctldata(spi)) | ||
1084 | spi_set_ctldata(spi, cs); | 1076 | spi_set_ctldata(spi, cs); |
1085 | |||
1086 | sci = sdd->cntrlr_info; | ||
1087 | |||
1088 | spin_lock_irqsave(&sdd->lock, flags); | ||
1089 | |||
1090 | list_for_each_entry(msg, &sdd->queue, queue) { | ||
1091 | /* Is some mssg is already queued for this device */ | ||
1092 | if (msg->spi == spi) { | ||
1093 | dev_err(&spi->dev, | ||
1094 | "setup: attempt while mssg in queue!\n"); | ||
1095 | spin_unlock_irqrestore(&sdd->lock, flags); | ||
1096 | err = -EBUSY; | ||
1097 | goto err_msgq; | ||
1098 | } | ||
1099 | } | 1077 | } |
1100 | 1078 | ||
1101 | spin_unlock_irqrestore(&sdd->lock, flags); | 1079 | sci = sdd->cntrlr_info; |
1102 | 1080 | ||
1103 | pm_runtime_get_sync(&sdd->pdev->dev); | 1081 | pm_runtime_get_sync(&sdd->pdev->dev); |
1104 | 1082 | ||
@@ -1146,7 +1124,6 @@ setup_exit: | |||
1146 | /* setup() returns with device de-selected */ | 1124 | /* setup() returns with device de-selected */ |
1147 | disable_cs(sdd, spi); | 1125 | disable_cs(sdd, spi); |
1148 | 1126 | ||
1149 | err_msgq: | ||
1150 | gpio_free(cs->line); | 1127 | gpio_free(cs->line); |
1151 | spi_set_ctldata(spi, NULL); | 1128 | spi_set_ctldata(spi, NULL); |
1152 | 1129 | ||
@@ -1361,16 +1338,14 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1361 | if (!sdd->pdev->dev.of_node) { | 1338 | if (!sdd->pdev->dev.of_node) { |
1362 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | 1339 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
1363 | if (!res) { | 1340 | if (!res) { |
1364 | dev_warn(&pdev->dev, "Unable to get SPI tx dma " | 1341 | dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n"); |
1365 | "resource. Switching to poll mode\n"); | ||
1366 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; | 1342 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; |
1367 | } else | 1343 | } else |
1368 | sdd->tx_dma.dmach = res->start; | 1344 | sdd->tx_dma.dmach = res->start; |
1369 | 1345 | ||
1370 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); | 1346 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); |
1371 | if (!res) { | 1347 | if (!res) { |
1372 | dev_warn(&pdev->dev, "Unable to get SPI rx dma " | 1348 | dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n"); |
1373 | "resource. Switching to poll mode\n"); | ||
1374 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; | 1349 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; |
1375 | } else | 1350 | } else |
1376 | sdd->rx_dma.dmach = res->start; | 1351 | sdd->rx_dma.dmach = res->start; |
@@ -1440,7 +1415,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1440 | 1415 | ||
1441 | spin_lock_init(&sdd->lock); | 1416 | spin_lock_init(&sdd->lock); |
1442 | init_completion(&sdd->xfer_completion); | 1417 | init_completion(&sdd->xfer_completion); |
1443 | INIT_LIST_HEAD(&sdd->queue); | ||
1444 | 1418 | ||
1445 | ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, | 1419 | ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, |
1446 | "spi-s3c64xx", sdd); | 1420 | "spi-s3c64xx", sdd); |
@@ -1462,8 +1436,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1462 | 1436 | ||
1463 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", | 1437 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", |
1464 | sdd->port_id, master->num_chipselect); | 1438 | sdd->port_id, master->num_chipselect); |
1465 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", | 1439 | dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n", |
1466 | mem_res->end, mem_res->start, | 1440 | mem_res, |
1467 | sdd->rx_dma.dmach, sdd->tx_dma.dmach); | 1441 | sdd->rx_dma.dmach, sdd->tx_dma.dmach); |
1468 | 1442 | ||
1469 | pm_runtime_enable(&pdev->dev); | 1443 | pm_runtime_enable(&pdev->dev); |