aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c122
1 files changed, 18 insertions, 104 deletions
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index cfb54e01d758..9d27016c899e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -212,7 +212,6 @@ struct mport_cdev_priv {
212#ifdef CONFIG_RAPIDIO_DMA_ENGINE 212#ifdef CONFIG_RAPIDIO_DMA_ENGINE
213 struct dma_chan *dmach; 213 struct dma_chan *dmach;
214 struct list_head async_list; 214 struct list_head async_list;
215 struct list_head pend_list;
216 spinlock_t req_lock; 215 spinlock_t req_lock;
217 struct mutex dma_lock; 216 struct mutex dma_lock;
218 struct kref dma_ref; 217 struct kref dma_ref;
@@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
258static struct class *dev_class; 257static struct class *dev_class;
259static dev_t dev_number; 258static dev_t dev_number;
260 259
261static struct workqueue_struct *dma_wq;
262
263static void mport_release_mapping(struct kref *ref); 260static void mport_release_mapping(struct kref *ref);
264 261
265static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, 262static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
@@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
539#ifdef CONFIG_RAPIDIO_DMA_ENGINE 536#ifdef CONFIG_RAPIDIO_DMA_ENGINE
540 537
541struct mport_dma_req { 538struct mport_dma_req {
539 struct kref refcount;
542 struct list_head node; 540 struct list_head node;
543 struct file *filp; 541 struct file *filp;
544 struct mport_cdev_priv *priv; 542 struct mport_cdev_priv *priv;
@@ -554,11 +552,6 @@ struct mport_dma_req {
554 struct completion req_comp; 552 struct completion req_comp;
555}; 553};
556 554
557struct mport_faf_work {
558 struct work_struct work;
559 struct mport_dma_req *req;
560};
561
562static void mport_release_def_dma(struct kref *dma_ref) 555static void mport_release_def_dma(struct kref *dma_ref)
563{ 556{
564 struct mport_dev *md = 557 struct mport_dev *md =
@@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
578 complete(&priv->comp); 571 complete(&priv->comp);
579} 572}
580 573
581static void dma_req_free(struct mport_dma_req *req) 574static void dma_req_free(struct kref *ref)
582{ 575{
576 struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
577 refcount);
583 struct mport_cdev_priv *priv = req->priv; 578 struct mport_cdev_priv *priv = req->priv;
584 unsigned int i; 579 unsigned int i;
585 580
@@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
611 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, 606 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
612 NULL, NULL); 607 NULL, NULL);
613 complete(&req->req_comp); 608 complete(&req->req_comp);
614} 609 kref_put(&req->refcount, dma_req_free);
615
616static void dma_faf_cleanup(struct work_struct *_work)
617{
618 struct mport_faf_work *work = container_of(_work,
619 struct mport_faf_work, work);
620 struct mport_dma_req *req = work->req;
621
622 dma_req_free(req);
623 kfree(work);
624}
625
626static void dma_faf_callback(void *param)
627{
628 struct mport_dma_req *req = (struct mport_dma_req *)param;
629 struct mport_faf_work *work;
630
631 work = kmalloc(sizeof(*work), GFP_ATOMIC);
632 if (!work)
633 return;
634
635 INIT_WORK(&work->work, dma_faf_cleanup);
636 work->req = req;
637 queue_work(dma_wq, &work->work);
638} 610}
639 611
640/* 612/*
@@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
765 goto err_out; 737 goto err_out;
766 } 738 }
767 739
768 if (sync == RIO_TRANSFER_FAF) 740 tx->callback = dma_xfer_callback;
769 tx->callback = dma_faf_callback;
770 else
771 tx->callback = dma_xfer_callback;
772 tx->callback_param = req; 741 tx->callback_param = req;
773 742
774 req->dmach = chan; 743 req->dmach = chan;
775 req->sync = sync; 744 req->sync = sync;
776 req->status = DMA_IN_PROGRESS; 745 req->status = DMA_IN_PROGRESS;
777 init_completion(&req->req_comp); 746 init_completion(&req->req_comp);
747 kref_get(&req->refcount);
778 748
779 cookie = dmaengine_submit(tx); 749 cookie = dmaengine_submit(tx);
780 req->cookie = cookie; 750 req->cookie = cookie;
@@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
785 if (dma_submit_error(cookie)) { 755 if (dma_submit_error(cookie)) {
786 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", 756 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
787 cookie, xfer->rio_addr, xfer->length); 757 cookie, xfer->rio_addr, xfer->length);
758 kref_put(&req->refcount, dma_req_free);
788 ret = -EIO; 759 ret = -EIO;
789 goto err_out; 760 goto err_out;
790 } 761 }
@@ -860,6 +831,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
860 if (!req) 831 if (!req)
861 return -ENOMEM; 832 return -ENOMEM;
862 833
834 kref_init(&req->refcount);
835
863 ret = get_dma_channel(priv); 836 ret = get_dma_channel(priv);
864 if (ret) { 837 if (ret) {
865 kfree(req); 838 kfree(req);
@@ -968,42 +941,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
968 ret = do_dma_request(req, xfer, sync, nents); 941 ret = do_dma_request(req, xfer, sync, nents);
969 942
970 if (ret >= 0) { 943 if (ret >= 0) {
971 if (sync == RIO_TRANSFER_SYNC) 944 if (sync == RIO_TRANSFER_ASYNC)
972 goto sync_out; 945 return ret; /* return ASYNC cookie */
973 return ret; /* return ASYNC cookie */ 946 } else {
974 } 947 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
975
976 if (ret == -ETIMEDOUT || ret == -EINTR) {
977 /*
978 * This can happen only in case of SYNC transfer.
979 * Do not free unfinished request structure immediately.
980 * Place it into pending list and deal with it later
981 */
982 spin_lock(&priv->req_lock);
983 list_add_tail(&req->node, &priv->pend_list);
984 spin_unlock(&priv->req_lock);
985 return ret;
986 } 948 }
987 949
988
989 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
990sync_out:
991 dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
992 sg_free_table(&req->sgt);
993err_pg: 950err_pg:
994 if (page_list) { 951 if (!req->page_list) {
995 for (i = 0; i < nr_pages; i++) 952 for (i = 0; i < nr_pages; i++)
996 put_page(page_list[i]); 953 put_page(page_list[i]);
997 kfree(page_list); 954 kfree(page_list);
998 } 955 }
999err_req: 956err_req:
1000 if (req->map) { 957 kref_put(&req->refcount, dma_req_free);
1001 mutex_lock(&md->buf_mutex);
1002 kref_put(&req->map->ref, mport_release_mapping);
1003 mutex_unlock(&md->buf_mutex);
1004 }
1005 put_dma_channel(priv);
1006 kfree(req);
1007 return ret; 958 return ret;
1008} 959}
1009 960
@@ -1121,7 +1072,7 @@ static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1121 ret = 0; 1072 ret = 0;
1122 1073
1123 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) 1074 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1124 dma_req_free(req); 1075 kref_put(&req->refcount, dma_req_free);
1125 1076
1126 return ret; 1077 return ret;
1127 1078
@@ -1966,7 +1917,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
1966 1917
1967#ifdef CONFIG_RAPIDIO_DMA_ENGINE 1918#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1968 INIT_LIST_HEAD(&priv->async_list); 1919 INIT_LIST_HEAD(&priv->async_list);
1969 INIT_LIST_HEAD(&priv->pend_list);
1970 spin_lock_init(&priv->req_lock); 1920 spin_lock_init(&priv->req_lock);
1971 mutex_init(&priv->dma_lock); 1921 mutex_init(&priv->dma_lock);
1972#endif 1922#endif
@@ -2006,8 +1956,6 @@ static void mport_cdev_release_dma(struct file *filp)
2006 1956
2007 md = priv->md; 1957 md = priv->md;
2008 1958
2009 flush_workqueue(dma_wq);
2010
2011 spin_lock(&priv->req_lock); 1959 spin_lock(&priv->req_lock);
2012 if (!list_empty(&priv->async_list)) { 1960 if (!list_empty(&priv->async_list)) {
2013 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", 1961 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
@@ -2023,20 +1971,7 @@ static void mport_cdev_release_dma(struct file *filp)
2023 req->filp, req->cookie, 1971 req->filp, req->cookie,
2024 completion_done(&req->req_comp)?"yes":"no"); 1972 completion_done(&req->req_comp)?"yes":"no");
2025 list_del(&req->node); 1973 list_del(&req->node);
2026 dma_req_free(req); 1974 kref_put(&req->refcount, dma_req_free);
2027 }
2028 }
2029
2030 if (!list_empty(&priv->pend_list)) {
2031 rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
2032 filp, current->comm, task_pid_nr(current));
2033 list_for_each_entry_safe(req,
2034 req_next, &priv->pend_list, node) {
2035 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
2036 req->filp, req->cookie,
2037 completion_done(&req->req_comp)?"yes":"no");
2038 list_del(&req->node);
2039 dma_req_free(req);
2040 } 1975 }
2041 } 1976 }
2042 1977
@@ -2048,15 +1983,6 @@ static void mport_cdev_release_dma(struct file *filp)
2048 current->comm, task_pid_nr(current), wret); 1983 current->comm, task_pid_nr(current), wret);
2049 } 1984 }
2050 1985
2051 spin_lock(&priv->req_lock);
2052
2053 if (!list_empty(&priv->pend_list)) {
2054 rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
2055 filp, current->comm, task_pid_nr(current));
2056 }
2057
2058 spin_unlock(&priv->req_lock);
2059
2060 if (priv->dmach != priv->md->dma_chan) { 1986 if (priv->dmach != priv->md->dma_chan) {
2061 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", 1987 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
2062 filp, current->comm, task_pid_nr(current)); 1988 filp, current->comm, task_pid_nr(current));
@@ -2573,8 +2499,6 @@ static void mport_cdev_remove(struct mport_dev *md)
2573 cdev_device_del(&md->cdev, &md->dev); 2499 cdev_device_del(&md->cdev, &md->dev);
2574 mport_cdev_kill_fasync(md); 2500 mport_cdev_kill_fasync(md);
2575 2501
2576 flush_workqueue(dma_wq);
2577
2578 /* TODO: do we need to give clients some time to close file 2502 /* TODO: do we need to give clients some time to close file
2579 * descriptors? Simple wait for XX, or kref? 2503 * descriptors? Simple wait for XX, or kref?
2580 */ 2504 */
@@ -2691,17 +2615,8 @@ static int __init mport_init(void)
2691 goto err_cli; 2615 goto err_cli;
2692 } 2616 }
2693 2617
2694 dma_wq = create_singlethread_workqueue("dma_wq");
2695 if (!dma_wq) {
2696 rmcd_error("failed to create DMA work queue");
2697 ret = -ENOMEM;
2698 goto err_wq;
2699 }
2700
2701 return 0; 2618 return 0;
2702 2619
2703err_wq:
2704 class_interface_unregister(&rio_mport_interface);
2705err_cli: 2620err_cli:
2706 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); 2621 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2707err_chr: 2622err_chr:
@@ -2717,7 +2632,6 @@ static void __exit mport_exit(void)
2717 class_interface_unregister(&rio_mport_interface); 2632 class_interface_unregister(&rio_mport_interface);
2718 class_destroy(dev_class); 2633 class_destroy(dev_class);
2719 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); 2634 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2720 destroy_workqueue(dma_wq);
2721} 2635}
2722 2636
2723module_init(mport_init); 2637module_init(mport_init);