diff options
author | Shannon Nelson <shannon.nelson@intel.com> | 2007-12-17 19:20:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-12-17 22:28:17 -0500 |
commit | 711924b1052a280bd2452c3babb9816e4a77c723 (patch) | |
tree | 7eea7de34b1f6bdd37989cee9916ee5f5f4e3093 /drivers/dma/ioat_dma.c | |
parent | 7c9e70efbfc3186674d93451e0fbf18365347b4d (diff) |
I/OAT: fixups from code comments
A few fixups from Andrew's code comments.
- removed "static inline" forward-declares
- changed use of min() to min_t()
- removed some unnecessary NULL initializations
- removed a couple of BUG() calls
Fixes this:
drivers/dma/ioat_dma.c: In function `ioat1_tx_submit':
drivers/dma/ioat_dma.c:177: sorry, unimplemented: inlining failed in call to '__ioat1_dma_memcpy_issue_pending': function body not available
drivers/dma/ioat_dma.c:268: sorry, unimplemented: called from here
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Cc: "Williams, Dan J" <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/dma/ioat_dma.c')
-rw-r--r-- | drivers/dma/ioat_dma.c | 142 |
1 files changed, 77 insertions, 65 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index c1c2dcc6fc2e..c17ec3276062 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -173,10 +173,47 @@ static void ioat_set_dest(dma_addr_t addr, | |||
173 | tx_to_ioat_desc(tx)->dst = addr; | 173 | tx_to_ioat_desc(tx)->dst = addr; |
174 | } | 174 | } |
175 | 175 | ||
176 | /** | ||
177 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
178 | * descriptors to hw | ||
179 | * @chan: DMA channel handle | ||
180 | */ | ||
176 | static inline void __ioat1_dma_memcpy_issue_pending( | 181 | static inline void __ioat1_dma_memcpy_issue_pending( |
177 | struct ioat_dma_chan *ioat_chan); | 182 | struct ioat_dma_chan *ioat_chan) |
183 | { | ||
184 | ioat_chan->pending = 0; | ||
185 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); | ||
186 | } | ||
187 | |||
188 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
189 | { | ||
190 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
191 | |||
192 | if (ioat_chan->pending != 0) { | ||
193 | spin_lock_bh(&ioat_chan->desc_lock); | ||
194 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | ||
195 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
196 | } | ||
197 | } | ||
198 | |||
178 | static inline void __ioat2_dma_memcpy_issue_pending( | 199 | static inline void __ioat2_dma_memcpy_issue_pending( |
179 | struct ioat_dma_chan *ioat_chan); | 200 | struct ioat_dma_chan *ioat_chan) |
201 | { | ||
202 | ioat_chan->pending = 0; | ||
203 | writew(ioat_chan->dmacount, | ||
204 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
205 | } | ||
206 | |||
207 | static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
208 | { | ||
209 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
210 | |||
211 | if (ioat_chan->pending != 0) { | ||
212 | spin_lock_bh(&ioat_chan->desc_lock); | ||
213 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | ||
214 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
215 | } | ||
216 | } | ||
180 | 217 | ||
181 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | 218 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
182 | { | 219 | { |
@@ -203,7 +240,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
203 | prev = to_ioat_desc(ioat_chan->used_desc.prev); | 240 | prev = to_ioat_desc(ioat_chan->used_desc.prev); |
204 | prefetch(prev->hw); | 241 | prefetch(prev->hw); |
205 | do { | 242 | do { |
206 | copy = min((u32) len, ioat_chan->xfercap); | 243 | copy = min_t(size_t, len, ioat_chan->xfercap); |
207 | 244 | ||
208 | new->async_tx.ack = 1; | 245 | new->async_tx.ack = 1; |
209 | 246 | ||
@@ -291,10 +328,12 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
291 | orig_ack = first->async_tx.ack; | 328 | orig_ack = first->async_tx.ack; |
292 | new = first; | 329 | new = first; |
293 | 330 | ||
294 | /* ioat_chan->desc_lock is still in force in version 2 path */ | 331 | /* |
295 | 332 | * ioat_chan->desc_lock is still in force in version 2 path | |
333 | * it gets unlocked at end of this function | ||
334 | */ | ||
296 | do { | 335 | do { |
297 | copy = min((u32) len, ioat_chan->xfercap); | 336 | copy = min_t(size_t, len, ioat_chan->xfercap); |
298 | 337 | ||
299 | new->async_tx.ack = 1; | 338 | new->async_tx.ack = 1; |
300 | 339 | ||
@@ -432,7 +471,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | |||
432 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | 471 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) |
433 | { | 472 | { |
434 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 473 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
435 | struct ioat_desc_sw *desc = NULL; | 474 | struct ioat_desc_sw *desc; |
436 | u16 chanctrl; | 475 | u16 chanctrl; |
437 | u32 chanerr; | 476 | u32 chanerr; |
438 | int i; | 477 | int i; |
@@ -575,7 +614,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
575 | static struct ioat_desc_sw * | 614 | static struct ioat_desc_sw * |
576 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | 615 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) |
577 | { | 616 | { |
578 | struct ioat_desc_sw *new = NULL; | 617 | struct ioat_desc_sw *new; |
579 | 618 | ||
580 | if (!list_empty(&ioat_chan->free_desc)) { | 619 | if (!list_empty(&ioat_chan->free_desc)) { |
581 | new = to_ioat_desc(ioat_chan->free_desc.next); | 620 | new = to_ioat_desc(ioat_chan->free_desc.next); |
@@ -583,9 +622,11 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
583 | } else { | 622 | } else { |
584 | /* try to get another desc */ | 623 | /* try to get another desc */ |
585 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | 624 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); |
586 | /* will this ever happen? */ | 625 | if (!new) { |
587 | /* TODO add upper limit on these */ | 626 | dev_err(&ioat_chan->device->pdev->dev, |
588 | BUG_ON(!new); | 627 | "alloc failed\n"); |
628 | return NULL; | ||
629 | } | ||
589 | } | 630 | } |
590 | 631 | ||
591 | prefetch(new->hw); | 632 | prefetch(new->hw); |
@@ -595,7 +636,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
595 | static struct ioat_desc_sw * | 636 | static struct ioat_desc_sw * |
596 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | 637 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) |
597 | { | 638 | { |
598 | struct ioat_desc_sw *new = NULL; | 639 | struct ioat_desc_sw *new; |
599 | 640 | ||
600 | /* | 641 | /* |
601 | * used.prev points to where to start processing | 642 | * used.prev points to where to start processing |
@@ -609,8 +650,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
609 | if (ioat_chan->used_desc.prev && | 650 | if (ioat_chan->used_desc.prev && |
610 | ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { | 651 | ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { |
611 | 652 | ||
612 | struct ioat_desc_sw *desc = NULL; | 653 | struct ioat_desc_sw *desc; |
613 | struct ioat_desc_sw *noop_desc = NULL; | 654 | struct ioat_desc_sw *noop_desc; |
614 | int i; | 655 | int i; |
615 | 656 | ||
616 | /* set up the noop descriptor */ | 657 | /* set up the noop descriptor */ |
@@ -624,10 +665,14 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
624 | ioat_chan->pending++; | 665 | ioat_chan->pending++; |
625 | ioat_chan->dmacount++; | 666 | ioat_chan->dmacount++; |
626 | 667 | ||
627 | /* get a few more descriptors */ | 668 | /* try to get a few more descriptors */ |
628 | for (i = 16; i; i--) { | 669 | for (i = 16; i; i--) { |
629 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | 670 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); |
630 | BUG_ON(!desc); | 671 | if (!desc) { |
672 | dev_err(&ioat_chan->device->pdev->dev, | ||
673 | "alloc failed\n"); | ||
674 | break; | ||
675 | } | ||
631 | list_add_tail(&desc->node, ioat_chan->used_desc.next); | 676 | list_add_tail(&desc->node, ioat_chan->used_desc.next); |
632 | 677 | ||
633 | desc->hw->next | 678 | desc->hw->next |
@@ -677,10 +722,13 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
677 | 722 | ||
678 | spin_lock_bh(&ioat_chan->desc_lock); | 723 | spin_lock_bh(&ioat_chan->desc_lock); |
679 | new = ioat_dma_get_next_descriptor(ioat_chan); | 724 | new = ioat_dma_get_next_descriptor(ioat_chan); |
680 | new->len = len; | ||
681 | spin_unlock_bh(&ioat_chan->desc_lock); | 725 | spin_unlock_bh(&ioat_chan->desc_lock); |
682 | 726 | ||
683 | return new ? &new->async_tx : NULL; | 727 | if (new) { |
728 | new->len = len; | ||
729 | return &new->async_tx; | ||
730 | } else | ||
731 | return NULL; | ||
684 | } | 732 | } |
685 | 733 | ||
686 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | 734 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( |
@@ -693,53 +741,17 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
693 | 741 | ||
694 | spin_lock_bh(&ioat_chan->desc_lock); | 742 | spin_lock_bh(&ioat_chan->desc_lock); |
695 | new = ioat2_dma_get_next_descriptor(ioat_chan); | 743 | new = ioat2_dma_get_next_descriptor(ioat_chan); |
696 | new->len = len; | ||
697 | |||
698 | /* leave ioat_chan->desc_lock set in version 2 path */ | ||
699 | return new ? &new->async_tx : NULL; | ||
700 | } | ||
701 | |||
702 | |||
703 | /** | ||
704 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
705 | * descriptors to hw | ||
706 | * @chan: DMA channel handle | ||
707 | */ | ||
708 | static inline void __ioat1_dma_memcpy_issue_pending( | ||
709 | struct ioat_dma_chan *ioat_chan) | ||
710 | { | ||
711 | ioat_chan->pending = 0; | ||
712 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); | ||
713 | } | ||
714 | |||
715 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
716 | { | ||
717 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
718 | |||
719 | if (ioat_chan->pending != 0) { | ||
720 | spin_lock_bh(&ioat_chan->desc_lock); | ||
721 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | ||
722 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
723 | } | ||
724 | } | ||
725 | |||
726 | static inline void __ioat2_dma_memcpy_issue_pending( | ||
727 | struct ioat_dma_chan *ioat_chan) | ||
728 | { | ||
729 | ioat_chan->pending = 0; | ||
730 | writew(ioat_chan->dmacount, | ||
731 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
732 | } | ||
733 | 744 | ||
734 | static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) | 745 | /* |
735 | { | 746 | * leave ioat_chan->desc_lock set in ioat 2 path |
736 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 747 | * it will get unlocked at end of tx_submit |
748 | */ | ||
737 | 749 | ||
738 | if (ioat_chan->pending != 0) { | 750 | if (new) { |
739 | spin_lock_bh(&ioat_chan->desc_lock); | 751 | new->len = len; |
740 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | 752 | return &new->async_tx; |
741 | spin_unlock_bh(&ioat_chan->desc_lock); | 753 | } else |
742 | } | 754 | return NULL; |
743 | } | 755 | } |
744 | 756 | ||
745 | static void ioat_dma_cleanup_tasklet(unsigned long data) | 757 | static void ioat_dma_cleanup_tasklet(unsigned long data) |
@@ -1019,7 +1031,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
1019 | static void ioat_dma_test_callback(void *dma_async_param) | 1031 | static void ioat_dma_test_callback(void *dma_async_param) |
1020 | { | 1032 | { |
1021 | printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", | 1033 | printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", |
1022 | dma_async_param); | 1034 | dma_async_param); |
1023 | } | 1035 | } |
1024 | 1036 | ||
1025 | /** | 1037 | /** |
@@ -1032,7 +1044,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1032 | u8 *src; | 1044 | u8 *src; |
1033 | u8 *dest; | 1045 | u8 *dest; |
1034 | struct dma_chan *dma_chan; | 1046 | struct dma_chan *dma_chan; |
1035 | struct dma_async_tx_descriptor *tx = NULL; | 1047 | struct dma_async_tx_descriptor *tx; |
1036 | dma_addr_t addr; | 1048 | dma_addr_t addr; |
1037 | dma_cookie_t cookie; | 1049 | dma_cookie_t cookie; |
1038 | int err = 0; | 1050 | int err = 0; |