diff options
author | Ira Snyder <iws@ovro.caltech.edu> | 2010-01-06 08:34:01 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-02-02 16:51:41 -0500 |
commit | a4f56d4b103d4e5d1a59a9118db0185a6bd1a83b (patch) | |
tree | aa00d6faf06d168e57c090f1eb05b16596b9a299 /drivers/dma | |
parent | 4ce0e953f6286777452bf07c83056342d6b9b257 (diff) |
fsldma: rename struct fsl_dma_chan to struct fsldma_chan
This is the beginning of a cleanup which will change all instances of
"fsl_dma" to "fsldma" to match the name of the driver itself.
Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/fsldma.c | 128 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 26 |
2 files changed, 81 insertions, 73 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 0b4e6383f480..6795d96e3629 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/fsldma.h> | 37 | #include <asm/fsldma.h> |
38 | #include "fsldma.h" | 38 | #include "fsldma.h" |
39 | 39 | ||
40 | static void dma_init(struct fsl_dma_chan *fsl_chan) | 40 | static void dma_init(struct fsldma_chan *fsl_chan) |
41 | { | 41 | { |
42 | /* Reset the channel */ | 42 | /* Reset the channel */ |
43 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | 43 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); |
@@ -64,23 +64,23 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) | |||
64 | 64 | ||
65 | } | 65 | } |
66 | 66 | ||
67 | static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) | 67 | static void set_sr(struct fsldma_chan *fsl_chan, u32 val) |
68 | { | 68 | { |
69 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | 69 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); |
70 | } | 70 | } |
71 | 71 | ||
72 | static u32 get_sr(struct fsl_dma_chan *fsl_chan) | 72 | static u32 get_sr(struct fsldma_chan *fsl_chan) |
73 | { | 73 | { |
74 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | 74 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | 77 | static void set_desc_cnt(struct fsldma_chan *fsl_chan, |
78 | struct fsl_dma_ld_hw *hw, u32 count) | 78 | struct fsl_dma_ld_hw *hw, u32 count) |
79 | { | 79 | { |
80 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | 80 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); |
81 | } | 81 | } |
82 | 82 | ||
83 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | 83 | static void set_desc_src(struct fsldma_chan *fsl_chan, |
84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
85 | { | 85 | { |
86 | u64 snoop_bits; | 86 | u64 snoop_bits; |
@@ -90,7 +90,7 @@ static void set_desc_src(struct fsl_dma_chan *fsl_chan, | |||
90 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | 90 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | 93 | static void set_desc_dest(struct fsldma_chan *fsl_chan, |
94 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | 94 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) |
95 | { | 95 | { |
96 | u64 snoop_bits; | 96 | u64 snoop_bits; |
@@ -100,7 +100,7 @@ static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | |||
100 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | 100 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); |
101 | } | 101 | } |
102 | 102 | ||
103 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | 103 | static void set_desc_next(struct fsldma_chan *fsl_chan, |
104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
105 | { | 105 | { |
106 | u64 snoop_bits; | 106 | u64 snoop_bits; |
@@ -110,38 +110,38 @@ static void set_desc_next(struct fsl_dma_chan *fsl_chan, | |||
110 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | 110 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); |
111 | } | 111 | } |
112 | 112 | ||
113 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | 113 | static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr) |
114 | { | 114 | { |
115 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | 115 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); |
116 | } | 116 | } |
117 | 117 | ||
118 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | 118 | static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan) |
119 | { | 119 | { |
120 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | 120 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; |
121 | } | 121 | } |
122 | 122 | ||
123 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | 123 | static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr) |
124 | { | 124 | { |
125 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | 125 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); |
126 | } | 126 | } |
127 | 127 | ||
128 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | 128 | static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan) |
129 | { | 129 | { |
130 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | 130 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); |
131 | } | 131 | } |
132 | 132 | ||
133 | static u32 get_bcr(struct fsl_dma_chan *fsl_chan) | 133 | static u32 get_bcr(struct fsldma_chan *fsl_chan) |
134 | { | 134 | { |
135 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); | 135 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); |
136 | } | 136 | } |
137 | 137 | ||
138 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | 138 | static int dma_is_idle(struct fsldma_chan *fsl_chan) |
139 | { | 139 | { |
140 | u32 sr = get_sr(fsl_chan); | 140 | u32 sr = get_sr(fsl_chan); |
141 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 141 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void dma_start(struct fsl_dma_chan *fsl_chan) | 144 | static void dma_start(struct fsldma_chan *fsl_chan) |
145 | { | 145 | { |
146 | u32 mode; | 146 | u32 mode; |
147 | 147 | ||
@@ -164,7 +164,7 @@ static void dma_start(struct fsl_dma_chan *fsl_chan) | |||
164 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); | 164 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32); |
165 | } | 165 | } |
166 | 166 | ||
167 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | 167 | static void dma_halt(struct fsldma_chan *fsl_chan) |
168 | { | 168 | { |
169 | u32 mode; | 169 | u32 mode; |
170 | int i; | 170 | int i; |
@@ -186,7 +186,7 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan) | |||
186 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | 186 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); |
187 | } | 187 | } |
188 | 188 | ||
189 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | 189 | static void set_ld_eol(struct fsldma_chan *fsl_chan, |
190 | struct fsl_desc_sw *desc) | 190 | struct fsl_desc_sw *desc) |
191 | { | 191 | { |
192 | u64 snoop_bits; | 192 | u64 snoop_bits; |
@@ -199,7 +199,7 @@ static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | |||
199 | | snoop_bits, 64); | 199 | | snoop_bits, 64); |
200 | } | 200 | } |
201 | 201 | ||
202 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | 202 | static void append_ld_queue(struct fsldma_chan *fsl_chan, |
203 | struct fsl_desc_sw *new_desc) | 203 | struct fsl_desc_sw *new_desc) |
204 | { | 204 | { |
205 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | 205 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); |
@@ -231,7 +231,7 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | |||
231 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | 231 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, |
232 | * SA + 1 ... and so on. | 232 | * SA + 1 ... and so on. |
233 | */ | 233 | */ |
234 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | 234 | static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) |
235 | { | 235 | { |
236 | u32 mode; | 236 | u32 mode; |
237 | 237 | ||
@@ -263,7 +263,7 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |||
263 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | 263 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, |
264 | * TA + 1 ... and so on. | 264 | * TA + 1 ... and so on. |
265 | */ | 265 | */ |
266 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | 266 | static void fsl_chan_set_dest_loop_size(struct fsldma_chan *fsl_chan, int size) |
267 | { | 267 | { |
268 | u32 mode; | 268 | u32 mode; |
269 | 269 | ||
@@ -296,7 +296,7 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |||
296 | * | 296 | * |
297 | * A size of 0 disables external pause control. The maximum size is 1024. | 297 | * A size of 0 disables external pause control. The maximum size is 1024. |
298 | */ | 298 | */ |
299 | static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) | 299 | static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size) |
300 | { | 300 | { |
301 | u32 mode; | 301 | u32 mode; |
302 | 302 | ||
@@ -317,7 +317,7 @@ static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) | |||
317 | * The DMA Request Count feature should be used in addition to this feature | 317 | * The DMA Request Count feature should be used in addition to this feature |
318 | * to set the number of bytes to transfer before pausing the channel. | 318 | * to set the number of bytes to transfer before pausing the channel. |
319 | */ | 319 | */ |
320 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) | 320 | static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable) |
321 | { | 321 | { |
322 | if (enable) | 322 | if (enable) |
323 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | 323 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; |
@@ -335,7 +335,7 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) | |||
335 | * transfer immediately. The DMA channel will wait for the | 335 | * transfer immediately. The DMA channel will wait for the |
336 | * control pin asserted. | 336 | * control pin asserted. |
337 | */ | 337 | */ |
338 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | 338 | static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable) |
339 | { | 339 | { |
340 | if (enable) | 340 | if (enable) |
341 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | 341 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; |
@@ -345,7 +345,7 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |||
345 | 345 | ||
346 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 346 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
347 | { | 347 | { |
348 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 348 | struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan); |
349 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 349 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
350 | struct fsl_desc_sw *child; | 350 | struct fsl_desc_sw *child; |
351 | unsigned long flags; | 351 | unsigned long flags; |
@@ -379,7 +379,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
379 | * Return - The descriptor allocated. NULL for failed. | 379 | * Return - The descriptor allocated. NULL for failed. |
380 | */ | 380 | */ |
381 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | 381 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( |
382 | struct fsl_dma_chan *fsl_chan) | 382 | struct fsldma_chan *fsl_chan) |
383 | { | 383 | { |
384 | dma_addr_t pdesc; | 384 | dma_addr_t pdesc; |
385 | struct fsl_desc_sw *desc_sw; | 385 | struct fsl_desc_sw *desc_sw; |
@@ -408,7 +408,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
408 | */ | 408 | */ |
409 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | 409 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) |
410 | { | 410 | { |
411 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 411 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); |
412 | 412 | ||
413 | /* Has this channel already been allocated? */ | 413 | /* Has this channel already been allocated? */ |
414 | if (fsl_chan->desc_pool) | 414 | if (fsl_chan->desc_pool) |
@@ -435,7 +435,7 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | |||
435 | */ | 435 | */ |
436 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | 436 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) |
437 | { | 437 | { |
438 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 438 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); |
439 | struct fsl_desc_sw *desc, *_desc; | 439 | struct fsl_desc_sw *desc, *_desc; |
440 | unsigned long flags; | 440 | unsigned long flags; |
441 | 441 | ||
@@ -459,7 +459,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan) | |||
459 | static struct dma_async_tx_descriptor * | 459 | static struct dma_async_tx_descriptor * |
460 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | 460 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) |
461 | { | 461 | { |
462 | struct fsl_dma_chan *fsl_chan; | 462 | struct fsldma_chan *fsl_chan; |
463 | struct fsl_desc_sw *new; | 463 | struct fsl_desc_sw *new; |
464 | 464 | ||
465 | if (!chan) | 465 | if (!chan) |
@@ -489,7 +489,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
489 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 489 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, |
490 | size_t len, unsigned long flags) | 490 | size_t len, unsigned long flags) |
491 | { | 491 | { |
492 | struct fsl_dma_chan *fsl_chan; | 492 | struct fsldma_chan *fsl_chan; |
493 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | 493 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; |
494 | struct list_head *list; | 494 | struct list_head *list; |
495 | size_t copy; | 495 | size_t copy; |
@@ -575,7 +575,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
575 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 575 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
576 | enum dma_data_direction direction, unsigned long flags) | 576 | enum dma_data_direction direction, unsigned long flags) |
577 | { | 577 | { |
578 | struct fsl_dma_chan *fsl_chan; | 578 | struct fsldma_chan *fsl_chan; |
579 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 579 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
580 | struct fsl_dma_slave *slave; | 580 | struct fsl_dma_slave *slave; |
581 | struct list_head *tx_list; | 581 | struct list_head *tx_list; |
@@ -759,7 +759,7 @@ fail: | |||
759 | 759 | ||
760 | static void fsl_dma_device_terminate_all(struct dma_chan *chan) | 760 | static void fsl_dma_device_terminate_all(struct dma_chan *chan) |
761 | { | 761 | { |
762 | struct fsl_dma_chan *fsl_chan; | 762 | struct fsldma_chan *fsl_chan; |
763 | struct fsl_desc_sw *desc, *tmp; | 763 | struct fsl_desc_sw *desc, *tmp; |
764 | unsigned long flags; | 764 | unsigned long flags; |
765 | 765 | ||
@@ -786,7 +786,7 @@ static void fsl_dma_device_terminate_all(struct dma_chan *chan) | |||
786 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 786 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
787 | * @fsl_chan : Freescale DMA channel | 787 | * @fsl_chan : Freescale DMA channel |
788 | */ | 788 | */ |
789 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | 789 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan) |
790 | { | 790 | { |
791 | struct fsl_desc_sw *cur_desc, *desc; | 791 | struct fsl_desc_sw *cur_desc, *desc; |
792 | dma_addr_t ld_phy; | 792 | dma_addr_t ld_phy; |
@@ -820,7 +820,7 @@ static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | |||
820 | * If 'in_intr' is set, the function will move the link descriptor to | 820 | * If 'in_intr' is set, the function will move the link descriptor to |
821 | * the recycle list. Otherwise, free it directly. | 821 | * the recycle list. Otherwise, free it directly. |
822 | */ | 822 | */ |
823 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | 823 | static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) |
824 | { | 824 | { |
825 | struct fsl_desc_sw *desc, *_desc; | 825 | struct fsl_desc_sw *desc, *_desc; |
826 | unsigned long flags; | 826 | unsigned long flags; |
@@ -864,7 +864,7 @@ static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | |||
864 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | 864 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. |
865 | * @fsl_chan : Freescale DMA channel | 865 | * @fsl_chan : Freescale DMA channel |
866 | */ | 866 | */ |
867 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | 867 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan) |
868 | { | 868 | { |
869 | struct list_head *ld_node; | 869 | struct list_head *ld_node; |
870 | dma_addr_t next_dest_addr; | 870 | dma_addr_t next_dest_addr; |
@@ -912,7 +912,7 @@ out_unlock: | |||
912 | */ | 912 | */ |
913 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | 913 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) |
914 | { | 914 | { |
915 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 915 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); |
916 | 916 | ||
917 | #ifdef FSL_DMA_LD_DEBUG | 917 | #ifdef FSL_DMA_LD_DEBUG |
918 | struct fsl_desc_sw *ld; | 918 | struct fsl_desc_sw *ld; |
@@ -949,7 +949,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | |||
949 | dma_cookie_t *done, | 949 | dma_cookie_t *done, |
950 | dma_cookie_t *used) | 950 | dma_cookie_t *used) |
951 | { | 951 | { |
952 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 952 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); |
953 | dma_cookie_t last_used; | 953 | dma_cookie_t last_used; |
954 | dma_cookie_t last_complete; | 954 | dma_cookie_t last_complete; |
955 | 955 | ||
@@ -969,7 +969,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | |||
969 | 969 | ||
970 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | 970 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) |
971 | { | 971 | { |
972 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | 972 | struct fsldma_chan *fsl_chan = data; |
973 | u32 stat; | 973 | u32 stat; |
974 | int update_cookie = 0; | 974 | int update_cookie = 0; |
975 | int xfer_ld_q = 0; | 975 | int xfer_ld_q = 0; |
@@ -1050,9 +1050,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |||
1050 | 1050 | ||
1051 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | 1051 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) |
1052 | { | 1052 | { |
1053 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | 1053 | struct fsldma_device *fdev = data; |
1054 | u32 gsr; | ||
1055 | int ch_nr; | 1054 | int ch_nr; |
1055 | u32 gsr; | ||
1056 | 1056 | ||
1057 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | 1057 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) |
1058 | : in_le32(fdev->reg_base); | 1058 | : in_le32(fdev->reg_base); |
@@ -1064,19 +1064,23 @@ static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | |||
1064 | 1064 | ||
1065 | static void dma_do_tasklet(unsigned long data) | 1065 | static void dma_do_tasklet(unsigned long data) |
1066 | { | 1066 | { |
1067 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | 1067 | struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data; |
1068 | fsl_chan_ld_cleanup(fsl_chan); | 1068 | fsl_chan_ld_cleanup(fsl_chan); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | 1071 | /*----------------------------------------------------------------------------*/ |
1072 | /* OpenFirmware Subsystem */ | ||
1073 | /*----------------------------------------------------------------------------*/ | ||
1074 | |||
1075 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | ||
1072 | struct device_node *node, u32 feature, const char *compatible) | 1076 | struct device_node *node, u32 feature, const char *compatible) |
1073 | { | 1077 | { |
1074 | struct fsl_dma_chan *new_fsl_chan; | 1078 | struct fsldma_chan *new_fsl_chan; |
1075 | struct resource res; | 1079 | struct resource res; |
1076 | int err; | 1080 | int err; |
1077 | 1081 | ||
1078 | /* alloc channel */ | 1082 | /* alloc channel */ |
1079 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | 1083 | new_fsl_chan = kzalloc(sizeof(*new_fsl_chan), GFP_KERNEL); |
1080 | if (!new_fsl_chan) { | 1084 | if (!new_fsl_chan) { |
1081 | dev_err(fdev->dev, "No free memory for allocating " | 1085 | dev_err(fdev->dev, "No free memory for allocating " |
1082 | "dma channels!\n"); | 1086 | "dma channels!\n"); |
@@ -1167,7 +1171,7 @@ err_no_reg: | |||
1167 | return err; | 1171 | return err; |
1168 | } | 1172 | } |
1169 | 1173 | ||
1170 | static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) | 1174 | static void fsl_dma_chan_remove(struct fsldma_chan *fchan) |
1171 | { | 1175 | { |
1172 | if (fchan->irq != NO_IRQ) | 1176 | if (fchan->irq != NO_IRQ) |
1173 | free_irq(fchan->irq, fchan); | 1177 | free_irq(fchan->irq, fchan); |
@@ -1176,15 +1180,15 @@ static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) | |||
1176 | kfree(fchan); | 1180 | kfree(fchan); |
1177 | } | 1181 | } |
1178 | 1182 | ||
1179 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | 1183 | static int __devinit fsldma_of_probe(struct of_device *dev, |
1180 | const struct of_device_id *match) | 1184 | const struct of_device_id *match) |
1181 | { | 1185 | { |
1182 | int err; | 1186 | int err; |
1183 | struct fsl_dma_device *fdev; | 1187 | struct fsldma_device *fdev; |
1184 | struct device_node *child; | 1188 | struct device_node *child; |
1185 | struct resource res; | 1189 | struct resource res; |
1186 | 1190 | ||
1187 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | 1191 | fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); |
1188 | if (!fdev) { | 1192 | if (!fdev) { |
1189 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | 1193 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); |
1190 | return -ENOMEM; | 1194 | return -ENOMEM; |
@@ -1256,9 +1260,9 @@ err_no_reg: | |||
1256 | return err; | 1260 | return err; |
1257 | } | 1261 | } |
1258 | 1262 | ||
1259 | static int of_fsl_dma_remove(struct of_device *of_dev) | 1263 | static int fsldma_of_remove(struct of_device *of_dev) |
1260 | { | 1264 | { |
1261 | struct fsl_dma_device *fdev; | 1265 | struct fsldma_device *fdev; |
1262 | unsigned int i; | 1266 | unsigned int i; |
1263 | 1267 | ||
1264 | fdev = dev_get_drvdata(&of_dev->dev); | 1268 | fdev = dev_get_drvdata(&of_dev->dev); |
@@ -1280,39 +1284,43 @@ static int of_fsl_dma_remove(struct of_device *of_dev) | |||
1280 | return 0; | 1284 | return 0; |
1281 | } | 1285 | } |
1282 | 1286 | ||
1283 | static struct of_device_id of_fsl_dma_ids[] = { | 1287 | static struct of_device_id fsldma_of_ids[] = { |
1284 | { .compatible = "fsl,eloplus-dma", }, | 1288 | { .compatible = "fsl,eloplus-dma", }, |
1285 | { .compatible = "fsl,elo-dma", }, | 1289 | { .compatible = "fsl,elo-dma", }, |
1286 | {} | 1290 | {} |
1287 | }; | 1291 | }; |
1288 | 1292 | ||
1289 | static struct of_platform_driver of_fsl_dma_driver = { | 1293 | static struct of_platform_driver fsldma_of_driver = { |
1290 | .name = "fsl-elo-dma", | 1294 | .name = "fsl-elo-dma", |
1291 | .match_table = of_fsl_dma_ids, | 1295 | .match_table = fsldma_of_ids, |
1292 | .probe = of_fsl_dma_probe, | 1296 | .probe = fsldma_of_probe, |
1293 | .remove = of_fsl_dma_remove, | 1297 | .remove = fsldma_of_remove, |
1294 | }; | 1298 | }; |
1295 | 1299 | ||
1296 | static __init int of_fsl_dma_init(void) | 1300 | /*----------------------------------------------------------------------------*/ |
1301 | /* Module Init / Exit */ | ||
1302 | /*----------------------------------------------------------------------------*/ | ||
1303 | |||
1304 | static __init int fsldma_init(void) | ||
1297 | { | 1305 | { |
1298 | int ret; | 1306 | int ret; |
1299 | 1307 | ||
1300 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | 1308 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); |
1301 | 1309 | ||
1302 | ret = of_register_platform_driver(&of_fsl_dma_driver); | 1310 | ret = of_register_platform_driver(&fsldma_of_driver); |
1303 | if (ret) | 1311 | if (ret) |
1304 | pr_err("fsldma: failed to register platform driver\n"); | 1312 | pr_err("fsldma: failed to register platform driver\n"); |
1305 | 1313 | ||
1306 | return ret; | 1314 | return ret; |
1307 | } | 1315 | } |
1308 | 1316 | ||
1309 | static void __exit of_fsl_dma_exit(void) | 1317 | static void __exit fsldma_exit(void) |
1310 | { | 1318 | { |
1311 | of_unregister_platform_driver(&of_fsl_dma_driver); | 1319 | of_unregister_platform_driver(&fsldma_of_driver); |
1312 | } | 1320 | } |
1313 | 1321 | ||
1314 | subsys_initcall(of_fsl_dma_init); | 1322 | subsys_initcall(fsldma_init); |
1315 | module_exit(of_fsl_dma_exit); | 1323 | module_exit(fsldma_exit); |
1316 | 1324 | ||
1317 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); | 1325 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); |
1318 | MODULE_LICENSE("GPL"); | 1326 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index dbb5b5cce4c2..f8c2baa6f41e 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -94,7 +94,7 @@ struct fsl_desc_sw { | |||
94 | struct dma_async_tx_descriptor async_tx; | 94 | struct dma_async_tx_descriptor async_tx; |
95 | } __attribute__((aligned(32))); | 95 | } __attribute__((aligned(32))); |
96 | 96 | ||
97 | struct fsl_dma_chan_regs { | 97 | struct fsldma_chan_regs { |
98 | u32 mr; /* 0x00 - Mode Register */ | 98 | u32 mr; /* 0x00 - Mode Register */ |
99 | u32 sr; /* 0x04 - Status Register */ | 99 | u32 sr; /* 0x04 - Status Register */ |
100 | u64 cdar; /* 0x08 - Current descriptor address register */ | 100 | u64 cdar; /* 0x08 - Current descriptor address register */ |
@@ -104,19 +104,19 @@ struct fsl_dma_chan_regs { | |||
104 | u64 ndar; /* 0x24 - Next Descriptor Address Register */ | 104 | u64 ndar; /* 0x24 - Next Descriptor Address Register */ |
105 | }; | 105 | }; |
106 | 106 | ||
107 | struct fsl_dma_chan; | 107 | struct fsldma_chan; |
108 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | 108 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 |
109 | 109 | ||
110 | struct fsl_dma_device { | 110 | struct fsldma_device { |
111 | void __iomem *reg_base; /* DGSR register base */ | 111 | void __iomem *reg_base; /* DGSR register base */ |
112 | struct device *dev; | 112 | struct device *dev; |
113 | struct dma_device common; | 113 | struct dma_device common; |
114 | struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; | 114 | struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; |
115 | u32 feature; /* The same as DMA channels */ | 115 | u32 feature; /* The same as DMA channels */ |
116 | int irq; /* Channel IRQ */ | 116 | int irq; /* Channel IRQ */ |
117 | }; | 117 | }; |
118 | 118 | ||
119 | /* Define macros for fsl_dma_chan->feature property */ | 119 | /* Define macros for fsldma_chan->feature property */ |
120 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 | 120 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 |
121 | #define FSL_DMA_BIG_ENDIAN 0x00000001 | 121 | #define FSL_DMA_BIG_ENDIAN 0x00000001 |
122 | 122 | ||
@@ -127,8 +127,8 @@ struct fsl_dma_device { | |||
127 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | 127 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 |
128 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | 128 | #define FSL_DMA_CHAN_START_EXT 0x00002000 |
129 | 129 | ||
130 | struct fsl_dma_chan { | 130 | struct fsldma_chan { |
131 | struct fsl_dma_chan_regs __iomem *reg_base; | 131 | struct fsldma_chan_regs __iomem *reg_base; |
132 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | 132 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ |
133 | spinlock_t desc_lock; /* Descriptor operation lock */ | 133 | spinlock_t desc_lock; /* Descriptor operation lock */ |
134 | struct list_head ld_queue; /* Link descriptors queue */ | 134 | struct list_head ld_queue; /* Link descriptors queue */ |
@@ -140,14 +140,14 @@ struct fsl_dma_chan { | |||
140 | struct tasklet_struct tasklet; | 140 | struct tasklet_struct tasklet; |
141 | u32 feature; | 141 | u32 feature; |
142 | 142 | ||
143 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); | 143 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); |
144 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | 144 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); |
145 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | 145 | void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size); |
146 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | 146 | void (*set_dest_loop_size)(struct fsldma_chan *fsl_chan, int size); |
147 | void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); | 147 | void (*set_request_count)(struct fsldma_chan *fsl_chan, int size); |
148 | }; | 148 | }; |
149 | 149 | ||
150 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | 150 | #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common) |
151 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | 151 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) |
152 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | 152 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) |
153 | 153 | ||