aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma_v2.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2010-03-03 23:21:13 -0500
committerDan Williams <dan.j.williams@intel.com>2010-03-03 23:21:13 -0500
commitaa4d72ae946a4fa40486b871717778734184fa29 (patch)
tree5c98641f00a7866e28a364861b9af9b6df606fdd /drivers/dma/ioat/dma_v2.c
parentb9cc98697d1ca35a86bbb708acc6d93993c28f0f (diff)
ioat: cleanup ->timer_fn() and ->cleanup_fn() prototypes
If the calling convention of ->timer_fn() and ->cleanup_fn() are unified across hardware versions we can drop parameters to ioat_init_channel() and unify ioat_is_dma_complete() implementations. Both ->timer_fn() and ->cleanup_fn() are modified to expect a struct dma_chan pointer. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma_v2.c')
-rw-r--r--drivers/dma/ioat/dma_v2.c34
1 files changed, 8 insertions, 26 deletions
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 01ed1cfd3eb6..25a3c72b2941 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -199,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
199 spin_unlock_bh(&chan->cleanup_lock); 199 spin_unlock_bh(&chan->cleanup_lock);
200} 200}
201 201
202void ioat2_cleanup_tasklet(unsigned long data) 202void ioat2_cleanup_event(unsigned long data)
203{ 203{
204 struct ioat2_dma_chan *ioat = (void *) data; 204 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
205 205
206 ioat2_cleanup(ioat); 206 ioat2_cleanup(ioat);
207 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 207 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
@@ -283,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
283 283
284void ioat2_timer_event(unsigned long data) 284void ioat2_timer_event(unsigned long data)
285{ 285{
286 struct ioat2_dma_chan *ioat = (void *) data; 286 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
287 struct ioat_chan_common *chan = &ioat->base; 287 struct ioat_chan_common *chan = &ioat->base;
288 288
289 spin_lock_bh(&chan->cleanup_lock); 289 spin_lock_bh(&chan->cleanup_lock);
@@ -389,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
389 if (!ioat) 389 if (!ioat)
390 break; 390 break;
391 391
392 ioat_init_channel(device, &ioat->base, i, 392 ioat_init_channel(device, &ioat->base, i);
393 device->timer_fn,
394 device->cleanup_tasklet,
395 (unsigned long) ioat);
396 ioat->xfercap_log = xfercap_log; 393 ioat->xfercap_log = xfercap_log;
397 spin_lock_init(&ioat->ring_lock); 394 spin_lock_init(&ioat->ring_lock);
398 if (device->reset_hw(&ioat->base)) { 395 if (device->reset_hw(&ioat->base)) {
@@ -692,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
692 689
693 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 690 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
694 spin_unlock_bh(&chan->cleanup_lock); 691 spin_unlock_bh(&chan->cleanup_lock);
695 device->timer_fn((unsigned long) ioat); 692 device->timer_fn((unsigned long) &chan->common);
696 } else 693 } else
697 spin_unlock_bh(&chan->cleanup_lock); 694 spin_unlock_bh(&chan->cleanup_lock);
698 return -ENOMEM; 695 return -ENOMEM;
@@ -776,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
776 773
777 tasklet_disable(&chan->cleanup_task); 774 tasklet_disable(&chan->cleanup_task);
778 del_timer_sync(&chan->timer); 775 del_timer_sync(&chan->timer);
779 device->cleanup_tasklet((unsigned long) ioat); 776 device->cleanup_fn((unsigned long) c);
780 device->reset_hw(chan); 777 device->reset_hw(chan);
781 778
782 spin_lock_bh(&ioat->ring_lock); 779 spin_lock_bh(&ioat->ring_lock);
@@ -809,21 +806,6 @@ void ioat2_free_chan_resources(struct dma_chan *c)
809 ioat->dmacount = 0; 806 ioat->dmacount = 0;
810} 807}
811 808
812enum dma_status
813ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
814 dma_cookie_t *done, dma_cookie_t *used)
815{
816 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
817 struct ioatdma_device *device = ioat->base.device;
818
819 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
820 return DMA_SUCCESS;
821
822 device->cleanup_tasklet((unsigned long) ioat);
823
824 return ioat_is_complete(c, cookie, done, used);
825}
826
827static ssize_t ring_size_show(struct dma_chan *c, char *page) 809static ssize_t ring_size_show(struct dma_chan *c, char *page)
828{ 810{
829 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 811 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
@@ -864,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
864 846
865 device->enumerate_channels = ioat2_enumerate_channels; 847 device->enumerate_channels = ioat2_enumerate_channels;
866 device->reset_hw = ioat2_reset_hw; 848 device->reset_hw = ioat2_reset_hw;
867 device->cleanup_tasklet = ioat2_cleanup_tasklet; 849 device->cleanup_fn = ioat2_cleanup_event;
868 device->timer_fn = ioat2_timer_event; 850 device->timer_fn = ioat2_timer_event;
869 device->self_test = ioat_dma_self_test; 851 device->self_test = ioat_dma_self_test;
870 dma = &device->common; 852 dma = &device->common;
@@ -872,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
872 dma->device_issue_pending = ioat2_issue_pending; 854 dma->device_issue_pending = ioat2_issue_pending;
873 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 855 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
874 dma->device_free_chan_resources = ioat2_free_chan_resources; 856 dma->device_free_chan_resources = ioat2_free_chan_resources;
875 dma->device_is_tx_complete = ioat2_is_complete; 857 dma->device_is_tx_complete = ioat_is_dma_complete;
876 858
877 err = ioat_probe(device); 859 err = ioat_probe(device);
878 if (err) 860 if (err)