diff options
| author | Vinod Koul <vinod.koul@intel.com> | 2013-01-21 09:35:12 -0500 |
|---|---|---|
| committer | Vinod Koul <vinod.koul@intel.com> | 2013-01-21 10:09:34 -0500 |
| commit | 6c5e6a3990ce64192b56ffafa5ffa5af129751d5 (patch) | |
| tree | 228632cd25a3ce0e00194fb492eaa4c50e5acbae | |
| parent | 77bcc497c60ec62dbb84abc809a6e218d53409e9 (diff) | |
| parent | da2ac56a1bc9c6c56244aa9ca990d5c5c7574b5f (diff) | |
Merge tag 'ux500-dma40' of //git.linaro.org/people/fabiobaltieri/linux.git
Pull ste_dma40 fixes from Fabio
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
| -rw-r--r-- | drivers/dma/ste_dma40.c | 489 | ||||
| -rw-r--r-- | drivers/dma/ste_dma40_ll.c | 29 | ||||
| -rw-r--r-- | drivers/dma/ste_dma40_ll.h | 130 | ||||
| -rw-r--r-- | include/linux/platform_data/dma-ste-dma40.h | 13 |
4 files changed, 518 insertions, 143 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index e5e60bb68d9d..ad860a221c33 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -53,6 +53,8 @@ | |||
| 53 | #define D40_ALLOC_PHY (1 << 30) | 53 | #define D40_ALLOC_PHY (1 << 30) |
| 54 | #define D40_ALLOC_LOG_FREE 0 | 54 | #define D40_ALLOC_LOG_FREE 0 |
| 55 | 55 | ||
| 56 | #define MAX(a, b) (((a) < (b)) ? (b) : (a)) | ||
| 57 | |||
| 56 | /** | 58 | /** |
| 57 | * enum 40_command - The different commands and/or statuses. | 59 | * enum 40_command - The different commands and/or statuses. |
| 58 | * | 60 | * |
| @@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = { | |||
| 100 | 102 | ||
| 101 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | 103 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) |
| 102 | 104 | ||
| 103 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | 105 | /* |
| 104 | static u32 d40_backup_regs_v3[] = { | 106 | * since 9540 and 8540 has the same HW revision |
| 107 | * use v4a for 9540 or ealier | ||
| 108 | * use v4b for 8540 or later | ||
| 109 | * HW revision: | ||
| 110 | * DB8500ed has revision 0 | ||
| 111 | * DB8500v1 has revision 2 | ||
| 112 | * DB8500v2 has revision 3 | ||
| 113 | * AP9540v1 has revision 4 | ||
| 114 | * DB8540v1 has revision 4 | ||
| 115 | * TODO: Check if all these registers have to be saved/restored on dma40 v4a | ||
| 116 | */ | ||
| 117 | static u32 d40_backup_regs_v4a[] = { | ||
| 105 | D40_DREG_PSEG1, | 118 | D40_DREG_PSEG1, |
| 106 | D40_DREG_PSEG2, | 119 | D40_DREG_PSEG2, |
| 107 | D40_DREG_PSEG3, | 120 | D40_DREG_PSEG3, |
| @@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = { | |||
| 120 | D40_DREG_RCEG4, | 133 | D40_DREG_RCEG4, |
| 121 | }; | 134 | }; |
| 122 | 135 | ||
| 123 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | 136 | #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) |
| 137 | |||
| 138 | static u32 d40_backup_regs_v4b[] = { | ||
| 139 | D40_DREG_CPSEG1, | ||
| 140 | D40_DREG_CPSEG2, | ||
| 141 | D40_DREG_CPSEG3, | ||
| 142 | D40_DREG_CPSEG4, | ||
| 143 | D40_DREG_CPSEG5, | ||
| 144 | D40_DREG_CPCEG1, | ||
| 145 | D40_DREG_CPCEG2, | ||
| 146 | D40_DREG_CPCEG3, | ||
| 147 | D40_DREG_CPCEG4, | ||
| 148 | D40_DREG_CPCEG5, | ||
| 149 | D40_DREG_CRSEG1, | ||
| 150 | D40_DREG_CRSEG2, | ||
| 151 | D40_DREG_CRSEG3, | ||
| 152 | D40_DREG_CRSEG4, | ||
| 153 | D40_DREG_CRSEG5, | ||
| 154 | D40_DREG_CRCEG1, | ||
| 155 | D40_DREG_CRCEG2, | ||
| 156 | D40_DREG_CRCEG3, | ||
| 157 | D40_DREG_CRCEG4, | ||
| 158 | D40_DREG_CRCEG5, | ||
| 159 | }; | ||
| 160 | |||
| 161 | #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) | ||
| 124 | 162 | ||
| 125 | static u32 d40_backup_regs_chan[] = { | 163 | static u32 d40_backup_regs_chan[] = { |
| 126 | D40_CHAN_REG_SSCFG, | 164 | D40_CHAN_REG_SSCFG, |
| @@ -134,6 +172,102 @@ static u32 d40_backup_regs_chan[] = { | |||
| 134 | }; | 172 | }; |
| 135 | 173 | ||
| 136 | /** | 174 | /** |
| 175 | * struct d40_interrupt_lookup - lookup table for interrupt handler | ||
| 176 | * | ||
| 177 | * @src: Interrupt mask register. | ||
| 178 | * @clr: Interrupt clear register. | ||
| 179 | * @is_error: true if this is an error interrupt. | ||
| 180 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | ||
| 181 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | ||
| 182 | */ | ||
| 183 | struct d40_interrupt_lookup { | ||
| 184 | u32 src; | ||
| 185 | u32 clr; | ||
| 186 | bool is_error; | ||
| 187 | int offset; | ||
| 188 | }; | ||
| 189 | |||
| 190 | |||
| 191 | static struct d40_interrupt_lookup il_v4a[] = { | ||
| 192 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | ||
| 193 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | ||
| 194 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | ||
| 195 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | ||
| 196 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | ||
| 197 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | ||
| 198 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | ||
| 199 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | ||
| 200 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | ||
| 201 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | ||
| 202 | }; | ||
| 203 | |||
| 204 | static struct d40_interrupt_lookup il_v4b[] = { | ||
| 205 | {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, | ||
| 206 | {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, | ||
| 207 | {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, | ||
| 208 | {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, | ||
| 209 | {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, | ||
| 210 | {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, | ||
| 211 | {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, | ||
| 212 | {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, | ||
| 213 | {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, | ||
| 214 | {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, | ||
| 215 | {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, | ||
| 216 | {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, | ||
| 217 | }; | ||
| 218 | |||
| 219 | /** | ||
| 220 | * struct d40_reg_val - simple lookup struct | ||
| 221 | * | ||
| 222 | * @reg: The register. | ||
| 223 | * @val: The value that belongs to the register in reg. | ||
| 224 | */ | ||
| 225 | struct d40_reg_val { | ||
| 226 | unsigned int reg; | ||
| 227 | unsigned int val; | ||
| 228 | }; | ||
| 229 | |||
| 230 | static __initdata struct d40_reg_val dma_init_reg_v4a[] = { | ||
| 231 | /* Clock every part of the DMA block from start */ | ||
| 232 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | ||
| 233 | |||
| 234 | /* Interrupts on all logical channels */ | ||
| 235 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | ||
| 236 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | ||
| 237 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | ||
| 238 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | ||
| 239 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | ||
| 240 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | ||
| 241 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | ||
| 242 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | ||
| 243 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | ||
| 244 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | ||
| 245 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | ||
| 246 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | ||
| 247 | }; | ||
| 248 | static __initdata struct d40_reg_val dma_init_reg_v4b[] = { | ||
| 249 | /* Clock every part of the DMA block from start */ | ||
| 250 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | ||
| 251 | |||
| 252 | /* Interrupts on all logical channels */ | ||
| 253 | { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, | ||
| 254 | { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, | ||
| 255 | { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, | ||
| 256 | { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, | ||
| 257 | { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, | ||
| 258 | { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, | ||
| 259 | { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, | ||
| 260 | { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, | ||
| 261 | { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, | ||
| 262 | { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, | ||
| 263 | { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, | ||
| 264 | { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, | ||
| 265 | { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, | ||
| 266 | { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, | ||
| 267 | { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} | ||
| 268 | }; | ||
| 269 | |||
| 270 | /** | ||
| 137 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 271 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
| 138 | * | 272 | * |
| 139 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | 273 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
| @@ -221,6 +355,7 @@ struct d40_lcla_pool { | |||
| 221 | * @allocated_dst: Same as for src but is dst. | 355 | * @allocated_dst: Same as for src but is dst. |
| 222 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | 356 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as |
| 223 | * event line number. | 357 | * event line number. |
| 358 | * @use_soft_lli: To mark if the linked lists of channel are managed by SW. | ||
| 224 | */ | 359 | */ |
| 225 | struct d40_phy_res { | 360 | struct d40_phy_res { |
| 226 | spinlock_t lock; | 361 | spinlock_t lock; |
| @@ -228,6 +363,7 @@ struct d40_phy_res { | |||
| 228 | int num; | 363 | int num; |
| 229 | u32 allocated_src; | 364 | u32 allocated_src; |
| 230 | u32 allocated_dst; | 365 | u32 allocated_dst; |
| 366 | bool use_soft_lli; | ||
| 231 | }; | 367 | }; |
| 232 | 368 | ||
| 233 | struct d40_base; | 369 | struct d40_base; |
| @@ -248,6 +384,7 @@ struct d40_base; | |||
| 248 | * @client: Cliented owned descriptor list. | 384 | * @client: Cliented owned descriptor list. |
| 249 | * @pending_queue: Submitted jobs, to be issued by issue_pending() | 385 | * @pending_queue: Submitted jobs, to be issued by issue_pending() |
| 250 | * @active: Active descriptor. | 386 | * @active: Active descriptor. |
| 387 | * @done: Completed jobs | ||
| 251 | * @queue: Queued jobs. | 388 | * @queue: Queued jobs. |
| 252 | * @prepare_queue: Prepared jobs. | 389 | * @prepare_queue: Prepared jobs. |
| 253 | * @dma_cfg: The client configuration of this dma channel. | 390 | * @dma_cfg: The client configuration of this dma channel. |
| @@ -273,6 +410,7 @@ struct d40_chan { | |||
| 273 | struct list_head client; | 410 | struct list_head client; |
| 274 | struct list_head pending_queue; | 411 | struct list_head pending_queue; |
| 275 | struct list_head active; | 412 | struct list_head active; |
| 413 | struct list_head done; | ||
| 276 | struct list_head queue; | 414 | struct list_head queue; |
| 277 | struct list_head prepare_queue; | 415 | struct list_head prepare_queue; |
| 278 | struct stedma40_chan_cfg dma_cfg; | 416 | struct stedma40_chan_cfg dma_cfg; |
| @@ -289,6 +427,38 @@ struct d40_chan { | |||
| 289 | }; | 427 | }; |
| 290 | 428 | ||
| 291 | /** | 429 | /** |
| 430 | * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA | ||
| 431 | * controller | ||
| 432 | * | ||
| 433 | * @backup: the pointer to the registers address array for backup | ||
| 434 | * @backup_size: the size of the registers address array for backup | ||
| 435 | * @realtime_en: the realtime enable register | ||
| 436 | * @realtime_clear: the realtime clear register | ||
| 437 | * @high_prio_en: the high priority enable register | ||
| 438 | * @high_prio_clear: the high priority clear register | ||
| 439 | * @interrupt_en: the interrupt enable register | ||
| 440 | * @interrupt_clear: the interrupt clear register | ||
| 441 | * @il: the pointer to struct d40_interrupt_lookup | ||
| 442 | * @il_size: the size of d40_interrupt_lookup array | ||
| 443 | * @init_reg: the pointer to the struct d40_reg_val | ||
| 444 | * @init_reg_size: the size of d40_reg_val array | ||
| 445 | */ | ||
| 446 | struct d40_gen_dmac { | ||
| 447 | u32 *backup; | ||
| 448 | u32 backup_size; | ||
| 449 | u32 realtime_en; | ||
| 450 | u32 realtime_clear; | ||
| 451 | u32 high_prio_en; | ||
| 452 | u32 high_prio_clear; | ||
| 453 | u32 interrupt_en; | ||
| 454 | u32 interrupt_clear; | ||
| 455 | struct d40_interrupt_lookup *il; | ||
| 456 | u32 il_size; | ||
| 457 | struct d40_reg_val *init_reg; | ||
| 458 | u32 init_reg_size; | ||
| 459 | }; | ||
| 460 | |||
| 461 | /** | ||
| 292 | * struct d40_base - The big global struct, one for each probe'd instance. | 462 | * struct d40_base - The big global struct, one for each probe'd instance. |
| 293 | * | 463 | * |
| 294 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | 464 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. |
| @@ -326,11 +496,13 @@ struct d40_chan { | |||
| 326 | * @desc_slab: cache for descriptors. | 496 | * @desc_slab: cache for descriptors. |
| 327 | * @reg_val_backup: Here the values of some hardware registers are stored | 497 | * @reg_val_backup: Here the values of some hardware registers are stored |
| 328 | * before the DMA is powered off. They are restored when the power is back on. | 498 | * before the DMA is powered off. They are restored when the power is back on. |
| 329 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | 499 | * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and |
| 330 | * later. | 500 | * later |
| 331 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | 501 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. |
| 332 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | 502 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. |
| 333 | * @initialized: true if the dma has been initialized | 503 | * @initialized: true if the dma has been initialized |
| 504 | * @gen_dmac: the struct for generic registers values to represent u8500/8540 | ||
| 505 | * DMA controller | ||
| 334 | */ | 506 | */ |
| 335 | struct d40_base { | 507 | struct d40_base { |
| 336 | spinlock_t interrupt_lock; | 508 | spinlock_t interrupt_lock; |
| @@ -344,6 +516,7 @@ struct d40_base { | |||
| 344 | int irq; | 516 | int irq; |
| 345 | int num_phy_chans; | 517 | int num_phy_chans; |
| 346 | int num_log_chans; | 518 | int num_log_chans; |
| 519 | struct device_dma_parameters dma_parms; | ||
| 347 | struct dma_device dma_both; | 520 | struct dma_device dma_both; |
| 348 | struct dma_device dma_slave; | 521 | struct dma_device dma_slave; |
| 349 | struct dma_device dma_memcpy; | 522 | struct dma_device dma_memcpy; |
| @@ -361,37 +534,11 @@ struct d40_base { | |||
| 361 | resource_size_t lcpa_size; | 534 | resource_size_t lcpa_size; |
| 362 | struct kmem_cache *desc_slab; | 535 | struct kmem_cache *desc_slab; |
| 363 | u32 reg_val_backup[BACKUP_REGS_SZ]; | 536 | u32 reg_val_backup[BACKUP_REGS_SZ]; |
| 364 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | 537 | u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; |
| 365 | u32 *reg_val_backup_chan; | 538 | u32 *reg_val_backup_chan; |
| 366 | u16 gcc_pwr_off_mask; | 539 | u16 gcc_pwr_off_mask; |
| 367 | bool initialized; | 540 | bool initialized; |
| 368 | }; | 541 | struct d40_gen_dmac gen_dmac; |
| 369 | |||
| 370 | /** | ||
| 371 | * struct d40_interrupt_lookup - lookup table for interrupt handler | ||
| 372 | * | ||
| 373 | * @src: Interrupt mask register. | ||
| 374 | * @clr: Interrupt clear register. | ||
| 375 | * @is_error: true if this is an error interrupt. | ||
| 376 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | ||
| 377 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | ||
| 378 | */ | ||
| 379 | struct d40_interrupt_lookup { | ||
| 380 | u32 src; | ||
| 381 | u32 clr; | ||
| 382 | bool is_error; | ||
| 383 | int offset; | ||
| 384 | }; | ||
| 385 | |||
| 386 | /** | ||
| 387 | * struct d40_reg_val - simple lookup struct | ||
| 388 | * | ||
| 389 | * @reg: The register. | ||
| 390 | * @val: The value that belongs to the register in reg. | ||
| 391 | */ | ||
| 392 | struct d40_reg_val { | ||
| 393 | unsigned int reg; | ||
| 394 | unsigned int val; | ||
| 395 | }; | 542 | }; |
| 396 | 543 | ||
| 397 | static struct device *chan2dev(struct d40_chan *d40c) | 544 | static struct device *chan2dev(struct d40_chan *d40c) |
| @@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c, | |||
| 494 | unsigned long flags; | 641 | unsigned long flags; |
| 495 | int i; | 642 | int i; |
| 496 | int ret = -EINVAL; | 643 | int ret = -EINVAL; |
| 497 | int p; | ||
| 498 | 644 | ||
| 499 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 645 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
| 500 | 646 | ||
| 501 | p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; | ||
| 502 | |||
| 503 | /* | 647 | /* |
| 504 | * Allocate both src and dst at the same time, therefore the half | 648 | * Allocate both src and dst at the same time, therefore the half |
| 505 | * start on 1 since 0 can't be used since zero is used as end marker. | 649 | * start on 1 since 0 can't be used since zero is used as end marker. |
| 506 | */ | 650 | */ |
| 507 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | 651 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
| 508 | if (!d40c->base->lcla_pool.alloc_map[p + i]) { | 652 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
| 509 | d40c->base->lcla_pool.alloc_map[p + i] = d40d; | 653 | |
| 654 | if (!d40c->base->lcla_pool.alloc_map[idx]) { | ||
| 655 | d40c->base->lcla_pool.alloc_map[idx] = d40d; | ||
| 510 | d40d->lcla_alloc++; | 656 | d40d->lcla_alloc++; |
| 511 | ret = i; | 657 | ret = i; |
| 512 | break; | 658 | break; |
| @@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c, | |||
| 531 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 677 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
| 532 | 678 | ||
| 533 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | 679 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
| 534 | if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | 680 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
| 535 | D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { | 681 | |
| 536 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | 682 | if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { |
| 537 | D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; | 683 | d40c->base->lcla_pool.alloc_map[idx] = NULL; |
| 538 | d40d->lcla_alloc--; | 684 | d40d->lcla_alloc--; |
| 539 | if (d40d->lcla_alloc == 0) { | 685 | if (d40d->lcla_alloc == 0) { |
| 540 | ret = 0; | 686 | ret = 0; |
| @@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) | |||
| 611 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | 757 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); |
| 612 | } | 758 | } |
| 613 | 759 | ||
| 760 | static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) | ||
| 761 | { | ||
| 762 | list_add_tail(&desc->node, &d40c->done); | ||
| 763 | } | ||
| 764 | |||
| 614 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | 765 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
| 615 | { | 766 | { |
| 616 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; | 767 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
| @@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
| 634 | * can't link back to the one in LCPA space | 785 | * can't link back to the one in LCPA space |
| 635 | */ | 786 | */ |
| 636 | if (linkback || (lli_len - lli_current > 1)) { | 787 | if (linkback || (lli_len - lli_current > 1)) { |
| 637 | curr_lcla = d40_lcla_alloc_one(chan, desc); | 788 | /* |
| 789 | * If the channel is expected to use only soft_lli don't | ||
| 790 | * allocate a lcla. This is to avoid a HW issue that exists | ||
| 791 | * in some controller during a peripheral to memory transfer | ||
| 792 | * that uses linked lists. | ||
| 793 | */ | ||
| 794 | if (!(chan->phy_chan->use_soft_lli && | ||
| 795 | chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) | ||
| 796 | curr_lcla = d40_lcla_alloc_one(chan, desc); | ||
| 797 | |||
| 638 | first_lcla = curr_lcla; | 798 | first_lcla = curr_lcla; |
| 639 | } | 799 | } |
| 640 | 800 | ||
| @@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
| 771 | return d; | 931 | return d; |
| 772 | } | 932 | } |
| 773 | 933 | ||
| 934 | static struct d40_desc *d40_first_done(struct d40_chan *d40c) | ||
| 935 | { | ||
| 936 | if (list_empty(&d40c->done)) | ||
| 937 | return NULL; | ||
| 938 | |||
| 939 | return list_first_entry(&d40c->done, struct d40_desc, node); | ||
| 940 | } | ||
| 941 | |||
| 774 | static int d40_psize_2_burst_size(bool is_log, int psize) | 942 | static int d40_psize_2_burst_size(bool is_log, int psize) |
| 775 | { | 943 | { |
| 776 | if (is_log) { | 944 | if (is_log) { |
| @@ -874,11 +1042,11 @@ static void d40_save_restore_registers(struct d40_base *base, bool save) | |||
| 874 | save); | 1042 | save); |
| 875 | 1043 | ||
| 876 | /* Save/Restore registers only existing on dma40 v3 and later */ | 1044 | /* Save/Restore registers only existing on dma40 v3 and later */ |
| 877 | if (base->rev >= 3) | 1045 | if (base->gen_dmac.backup) |
| 878 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | 1046 | dma40_backup(base->virtbase, base->reg_val_backup_v4, |
| 879 | d40_backup_regs_v3, | 1047 | base->gen_dmac.backup, |
| 880 | ARRAY_SIZE(d40_backup_regs_v3), | 1048 | base->gen_dmac.backup_size, |
| 881 | save); | 1049 | save); |
| 882 | } | 1050 | } |
| 883 | #else | 1051 | #else |
| 884 | static void d40_save_restore_registers(struct d40_base *base, bool save) | 1052 | static void d40_save_restore_registers(struct d40_base *base, bool save) |
| @@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c) | |||
| 961 | struct d40_desc *d40d; | 1129 | struct d40_desc *d40d; |
| 962 | struct d40_desc *_d; | 1130 | struct d40_desc *_d; |
| 963 | 1131 | ||
| 1132 | /* Release completed descriptors */ | ||
| 1133 | while ((d40d = d40_first_done(d40c))) { | ||
| 1134 | d40_desc_remove(d40d); | ||
| 1135 | d40_desc_free(d40c, d40d); | ||
| 1136 | } | ||
| 1137 | |||
| 964 | /* Release active descriptors */ | 1138 | /* Release active descriptors */ |
| 965 | while ((d40d = d40_first_active_get(d40c))) { | 1139 | while ((d40d = d40_first_active_get(d40c))) { |
| 966 | d40_desc_remove(d40d); | 1140 | d40_desc_remove(d40d); |
| @@ -1398,6 +1572,9 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
| 1398 | pm_runtime_put_autosuspend(d40c->base->dev); | 1572 | pm_runtime_put_autosuspend(d40c->base->dev); |
| 1399 | } | 1573 | } |
| 1400 | 1574 | ||
| 1575 | d40_desc_remove(d40d); | ||
| 1576 | d40_desc_done(d40c, d40d); | ||
| 1577 | |||
| 1401 | d40c->pending_tx++; | 1578 | d40c->pending_tx++; |
| 1402 | tasklet_schedule(&d40c->tasklet); | 1579 | tasklet_schedule(&d40c->tasklet); |
| 1403 | 1580 | ||
| @@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data) | |||
| 1413 | 1590 | ||
| 1414 | spin_lock_irqsave(&d40c->lock, flags); | 1591 | spin_lock_irqsave(&d40c->lock, flags); |
| 1415 | 1592 | ||
| 1416 | /* Get first active entry from list */ | 1593 | /* Get first entry from the done list */ |
| 1417 | d40d = d40_first_active_get(d40c); | 1594 | d40d = d40_first_done(d40c); |
| 1418 | if (d40d == NULL) | 1595 | if (d40d == NULL) { |
| 1419 | goto err; | 1596 | /* Check if we have reached here for cyclic job */ |
| 1597 | d40d = d40_first_active_get(d40c); | ||
| 1598 | if (d40d == NULL || !d40d->cyclic) | ||
| 1599 | goto err; | ||
| 1600 | } | ||
| 1420 | 1601 | ||
| 1421 | if (!d40d->cyclic) | 1602 | if (!d40d->cyclic) |
| 1422 | dma_cookie_complete(&d40d->txd); | 1603 | dma_cookie_complete(&d40d->txd); |
| @@ -1438,13 +1619,11 @@ static void dma_tasklet(unsigned long data) | |||
| 1438 | if (async_tx_test_ack(&d40d->txd)) { | 1619 | if (async_tx_test_ack(&d40d->txd)) { |
| 1439 | d40_desc_remove(d40d); | 1620 | d40_desc_remove(d40d); |
| 1440 | d40_desc_free(d40c, d40d); | 1621 | d40_desc_free(d40c, d40d); |
| 1441 | } else { | 1622 | } else if (!d40d->is_in_client_list) { |
| 1442 | if (!d40d->is_in_client_list) { | 1623 | d40_desc_remove(d40d); |
| 1443 | d40_desc_remove(d40d); | 1624 | d40_lcla_free_all(d40c, d40d); |
| 1444 | d40_lcla_free_all(d40c, d40d); | 1625 | list_add_tail(&d40d->node, &d40c->client); |
| 1445 | list_add_tail(&d40d->node, &d40c->client); | 1626 | d40d->is_in_client_list = true; |
| 1446 | d40d->is_in_client_list = true; | ||
| 1447 | } | ||
| 1448 | } | 1627 | } |
| 1449 | } | 1628 | } |
| 1450 | 1629 | ||
| @@ -1469,53 +1648,51 @@ err: | |||
| 1469 | 1648 | ||
| 1470 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | 1649 | static irqreturn_t d40_handle_interrupt(int irq, void *data) |
| 1471 | { | 1650 | { |
| 1472 | static const struct d40_interrupt_lookup il[] = { | ||
| 1473 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | ||
| 1474 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | ||
| 1475 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | ||
| 1476 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | ||
| 1477 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | ||
| 1478 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | ||
| 1479 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | ||
| 1480 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | ||
| 1481 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | ||
| 1482 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | ||
| 1483 | }; | ||
| 1484 | |||
| 1485 | int i; | 1651 | int i; |
| 1486 | u32 regs[ARRAY_SIZE(il)]; | ||
| 1487 | u32 idx; | 1652 | u32 idx; |
| 1488 | u32 row; | 1653 | u32 row; |
| 1489 | long chan = -1; | 1654 | long chan = -1; |
| 1490 | struct d40_chan *d40c; | 1655 | struct d40_chan *d40c; |
| 1491 | unsigned long flags; | 1656 | unsigned long flags; |
| 1492 | struct d40_base *base = data; | 1657 | struct d40_base *base = data; |
| 1658 | u32 regs[base->gen_dmac.il_size]; | ||
| 1659 | struct d40_interrupt_lookup *il = base->gen_dmac.il; | ||
| 1660 | u32 il_size = base->gen_dmac.il_size; | ||
| 1493 | 1661 | ||
| 1494 | spin_lock_irqsave(&base->interrupt_lock, flags); | 1662 | spin_lock_irqsave(&base->interrupt_lock, flags); |
| 1495 | 1663 | ||
| 1496 | /* Read interrupt status of both logical and physical channels */ | 1664 | /* Read interrupt status of both logical and physical channels */ |
| 1497 | for (i = 0; i < ARRAY_SIZE(il); i++) | 1665 | for (i = 0; i < il_size; i++) |
| 1498 | regs[i] = readl(base->virtbase + il[i].src); | 1666 | regs[i] = readl(base->virtbase + il[i].src); |
| 1499 | 1667 | ||
| 1500 | for (;;) { | 1668 | for (;;) { |
| 1501 | 1669 | ||
| 1502 | chan = find_next_bit((unsigned long *)regs, | 1670 | chan = find_next_bit((unsigned long *)regs, |
| 1503 | BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); | 1671 | BITS_PER_LONG * il_size, chan + 1); |
| 1504 | 1672 | ||
| 1505 | /* No more set bits found? */ | 1673 | /* No more set bits found? */ |
| 1506 | if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) | 1674 | if (chan == BITS_PER_LONG * il_size) |
| 1507 | break; | 1675 | break; |
| 1508 | 1676 | ||
| 1509 | row = chan / BITS_PER_LONG; | 1677 | row = chan / BITS_PER_LONG; |
| 1510 | idx = chan & (BITS_PER_LONG - 1); | 1678 | idx = chan & (BITS_PER_LONG - 1); |
| 1511 | 1679 | ||
| 1512 | /* ACK interrupt */ | ||
| 1513 | writel(1 << idx, base->virtbase + il[row].clr); | ||
| 1514 | |||
| 1515 | if (il[row].offset == D40_PHY_CHAN) | 1680 | if (il[row].offset == D40_PHY_CHAN) |
| 1516 | d40c = base->lookup_phy_chans[idx]; | 1681 | d40c = base->lookup_phy_chans[idx]; |
| 1517 | else | 1682 | else |
| 1518 | d40c = base->lookup_log_chans[il[row].offset + idx]; | 1683 | d40c = base->lookup_log_chans[il[row].offset + idx]; |
| 1684 | |||
| 1685 | if (!d40c) { | ||
| 1686 | /* | ||
| 1687 | * No error because this can happen if something else | ||
| 1688 | * in the system is using the channel. | ||
| 1689 | */ | ||
| 1690 | continue; | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | /* ACK interrupt */ | ||
| 1694 | writel(1 << idx, base->virtbase + il[row].clr); | ||
| 1695 | |||
| 1519 | spin_lock(&d40c->lock); | 1696 | spin_lock(&d40c->lock); |
| 1520 | 1697 | ||
| 1521 | if (!il[row].is_error) | 1698 | if (!il[row].is_error) |
| @@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
| 1710 | int i; | 1887 | int i; |
| 1711 | int j; | 1888 | int j; |
| 1712 | int log_num; | 1889 | int log_num; |
| 1890 | int num_phy_chans; | ||
| 1713 | bool is_src; | 1891 | bool is_src; |
| 1714 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; | 1892 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
| 1715 | 1893 | ||
| 1716 | phys = d40c->base->phy_res; | 1894 | phys = d40c->base->phy_res; |
| 1895 | num_phy_chans = d40c->base->num_phy_chans; | ||
| 1717 | 1896 | ||
| 1718 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1897 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
| 1719 | dev_type = d40c->dma_cfg.src_dev_type; | 1898 | dev_type = d40c->dma_cfg.src_dev_type; |
| @@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
| 1734 | if (!is_log) { | 1913 | if (!is_log) { |
| 1735 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1914 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
| 1736 | /* Find physical half channel */ | 1915 | /* Find physical half channel */ |
| 1737 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1916 | if (d40c->dma_cfg.use_fixed_channel) { |
| 1738 | 1917 | i = d40c->dma_cfg.phy_channel; | |
| 1739 | if (d40_alloc_mask_set(&phys[i], is_src, | 1918 | if (d40_alloc_mask_set(&phys[i], is_src, |
| 1740 | 0, is_log, | 1919 | 0, is_log, |
| 1741 | first_phy_user)) | 1920 | first_phy_user)) |
| 1742 | goto found_phy; | 1921 | goto found_phy; |
| 1922 | } else { | ||
| 1923 | for (i = 0; i < num_phy_chans; i++) { | ||
| 1924 | if (d40_alloc_mask_set(&phys[i], is_src, | ||
| 1925 | 0, is_log, | ||
| 1926 | first_phy_user)) | ||
| 1927 | goto found_phy; | ||
| 1928 | } | ||
| 1743 | } | 1929 | } |
| 1744 | } else | 1930 | } else |
| 1745 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1931 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
| @@ -1954,7 +2140,6 @@ _exit: | |||
| 1954 | 2140 | ||
| 1955 | } | 2141 | } |
| 1956 | 2142 | ||
| 1957 | |||
| 1958 | static u32 stedma40_residue(struct dma_chan *chan) | 2143 | static u32 stedma40_residue(struct dma_chan *chan) |
| 1959 | { | 2144 | { |
| 1960 | struct d40_chan *d40c = | 2145 | struct d40_chan *d40c = |
| @@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | |||
| 2030 | return ret < 0 ? ret : 0; | 2215 | return ret < 0 ? ret : 0; |
| 2031 | } | 2216 | } |
| 2032 | 2217 | ||
| 2033 | |||
| 2034 | static struct d40_desc * | 2218 | static struct d40_desc * |
| 2035 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | 2219 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, |
| 2036 | unsigned int sg_len, unsigned long dma_flags) | 2220 | unsigned int sg_len, unsigned long dma_flags) |
| @@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | |||
| 2056 | goto err; | 2240 | goto err; |
| 2057 | } | 2241 | } |
| 2058 | 2242 | ||
| 2059 | |||
| 2060 | desc->lli_current = 0; | 2243 | desc->lli_current = 0; |
| 2061 | desc->txd.flags = dma_flags; | 2244 | desc->txd.flags = dma_flags; |
| 2062 | desc->txd.tx_submit = d40_tx_submit; | 2245 | desc->txd.tx_submit = d40_tx_submit; |
| @@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
| 2105 | return NULL; | 2288 | return NULL; |
| 2106 | } | 2289 | } |
| 2107 | 2290 | ||
| 2108 | |||
| 2109 | spin_lock_irqsave(&chan->lock, flags); | 2291 | spin_lock_irqsave(&chan->lock, flags); |
| 2110 | 2292 | ||
| 2111 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); | 2293 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
| @@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) | |||
| 2179 | { | 2361 | { |
| 2180 | bool realtime = d40c->dma_cfg.realtime; | 2362 | bool realtime = d40c->dma_cfg.realtime; |
| 2181 | bool highprio = d40c->dma_cfg.high_priority; | 2363 | bool highprio = d40c->dma_cfg.high_priority; |
| 2182 | u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; | 2364 | u32 rtreg; |
| 2183 | u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; | ||
| 2184 | u32 event = D40_TYPE_TO_EVENT(dev_type); | 2365 | u32 event = D40_TYPE_TO_EVENT(dev_type); |
| 2185 | u32 group = D40_TYPE_TO_GROUP(dev_type); | 2366 | u32 group = D40_TYPE_TO_GROUP(dev_type); |
| 2186 | u32 bit = 1 << event; | 2367 | u32 bit = 1 << event; |
| 2368 | u32 prioreg; | ||
| 2369 | struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; | ||
| 2370 | |||
| 2371 | rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; | ||
| 2372 | /* | ||
| 2373 | * Due to a hardware bug, in some cases a logical channel triggered by | ||
| 2374 | * a high priority destination event line can generate extra packet | ||
| 2375 | * transactions. | ||
| 2376 | * | ||
| 2377 | * The workaround is to not set the high priority level for the | ||
| 2378 | * destination event lines that trigger logical channels. | ||
| 2379 | */ | ||
| 2380 | if (!src && chan_is_logical(d40c)) | ||
| 2381 | highprio = false; | ||
| 2382 | |||
| 2383 | prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; | ||
| 2187 | 2384 | ||
| 2188 | /* Destination event lines are stored in the upper halfword */ | 2385 | /* Destination event lines are stored in the upper halfword */ |
| 2189 | if (!src) | 2386 | if (!src) |
| @@ -2248,11 +2445,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
| 2248 | 2445 | ||
| 2249 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 2446 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
| 2250 | d40c->lcpa = d40c->base->lcpa_base + | 2447 | d40c->lcpa = d40c->base->lcpa_base + |
| 2251 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; | 2448 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; |
| 2252 | else | 2449 | else |
| 2253 | d40c->lcpa = d40c->base->lcpa_base + | 2450 | d40c->lcpa = d40c->base->lcpa_base + |
| 2254 | d40c->dma_cfg.dst_dev_type * | 2451 | d40c->dma_cfg.dst_dev_type * |
| 2255 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2452 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
| 2256 | } | 2453 | } |
| 2257 | 2454 | ||
| 2258 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | 2455 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", |
| @@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
| 2287 | return; | 2484 | return; |
| 2288 | } | 2485 | } |
| 2289 | 2486 | ||
| 2290 | |||
| 2291 | spin_lock_irqsave(&d40c->lock, flags); | 2487 | spin_lock_irqsave(&d40c->lock, flags); |
| 2292 | 2488 | ||
| 2293 | err = d40_free_dma(d40c); | 2489 | err = d40_free_dma(d40c); |
| @@ -2330,12 +2526,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
| 2330 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); | 2526 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); |
| 2331 | } | 2527 | } |
| 2332 | 2528 | ||
| 2333 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2529 | static struct dma_async_tx_descriptor * |
| 2334 | struct scatterlist *sgl, | 2530 | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
| 2335 | unsigned int sg_len, | 2531 | unsigned int sg_len, enum dma_transfer_direction direction, |
| 2336 | enum dma_transfer_direction direction, | 2532 | unsigned long dma_flags, void *context) |
| 2337 | unsigned long dma_flags, | ||
| 2338 | void *context) | ||
| 2339 | { | 2533 | { |
| 2340 | if (!is_slave_direction(direction)) | 2534 | if (!is_slave_direction(direction)) |
| 2341 | return NULL; | 2535 | return NULL; |
| @@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
| 2577 | return -EINVAL; | 2771 | return -EINVAL; |
| 2578 | } | 2772 | } |
| 2579 | 2773 | ||
| 2774 | if (src_maxburst > 16) { | ||
| 2775 | src_maxburst = 16; | ||
| 2776 | dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; | ||
| 2777 | } else if (dst_maxburst > 16) { | ||
| 2778 | dst_maxburst = 16; | ||
| 2779 | src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; | ||
| 2780 | } | ||
| 2781 | |||
| 2580 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, | 2782 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
| 2581 | src_addr_width, | 2783 | src_addr_width, |
| 2582 | src_maxburst); | 2784 | src_maxburst); |
| @@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
| 2659 | 2861 | ||
| 2660 | d40c->log_num = D40_PHY_CHAN; | 2862 | d40c->log_num = D40_PHY_CHAN; |
| 2661 | 2863 | ||
| 2864 | INIT_LIST_HEAD(&d40c->done); | ||
| 2662 | INIT_LIST_HEAD(&d40c->active); | 2865 | INIT_LIST_HEAD(&d40c->active); |
| 2663 | INIT_LIST_HEAD(&d40c->queue); | 2866 | INIT_LIST_HEAD(&d40c->queue); |
| 2664 | INIT_LIST_HEAD(&d40c->pending_queue); | 2867 | INIT_LIST_HEAD(&d40c->pending_queue); |
| @@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev) | |||
| 2773 | struct platform_device *pdev = to_platform_device(dev); | 2976 | struct platform_device *pdev = to_platform_device(dev); |
| 2774 | struct d40_base *base = platform_get_drvdata(pdev); | 2977 | struct d40_base *base = platform_get_drvdata(pdev); |
| 2775 | int ret = 0; | 2978 | int ret = 0; |
| 2776 | if (!pm_runtime_suspended(dev)) | ||
| 2777 | return -EBUSY; | ||
| 2778 | 2979 | ||
| 2779 | if (base->lcpa_regulator) | 2980 | if (base->lcpa_regulator) |
| 2780 | ret = regulator_disable(base->lcpa_regulator); | 2981 | ret = regulator_disable(base->lcpa_regulator); |
| @@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
| 2882 | num_phy_chans_avail--; | 3083 | num_phy_chans_avail--; |
| 2883 | } | 3084 | } |
| 2884 | 3085 | ||
| 3086 | /* Mark soft_lli channels */ | ||
| 3087 | for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { | ||
| 3088 | int chan = base->plat_data->soft_lli_chans[i]; | ||
| 3089 | |||
| 3090 | base->phy_res[chan].use_soft_lli = true; | ||
| 3091 | } | ||
| 3092 | |||
| 2885 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | 3093 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
| 2886 | num_phy_chans_avail, base->num_phy_chans); | 3094 | num_phy_chans_avail, base->num_phy_chans); |
| 2887 | 3095 | ||
| @@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2975 | * ? has revision 1 | 3183 | * ? has revision 1 |
| 2976 | * DB8500v1 has revision 2 | 3184 | * DB8500v1 has revision 2 |
| 2977 | * DB8500v2 has revision 3 | 3185 | * DB8500v2 has revision 3 |
| 3186 | * AP9540v1 has revision 4 | ||
| 3187 | * DB8540v1 has revision 4 | ||
| 2978 | */ | 3188 | */ |
| 2979 | rev = AMBA_REV_BITS(pid); | 3189 | rev = AMBA_REV_BITS(pid); |
| 2980 | 3190 | ||
| 3191 | plat_data = pdev->dev.platform_data; | ||
| 3192 | |||
| 2981 | /* The number of physical channels on this HW */ | 3193 | /* The number of physical channels on this HW */ |
| 2982 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 3194 | if (plat_data->num_of_phy_chans) |
| 3195 | num_phy_chans = plat_data->num_of_phy_chans; | ||
| 3196 | else | ||
| 3197 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | ||
| 2983 | 3198 | ||
| 2984 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 3199 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", |
| 2985 | rev, res->start); | 3200 | rev, res->start, num_phy_chans); |
| 2986 | 3201 | ||
| 2987 | if (rev < 2) { | 3202 | if (rev < 2) { |
| 2988 | d40_err(&pdev->dev, "hardware revision: %d is not supported", | 3203 | d40_err(&pdev->dev, "hardware revision: %d is not supported", |
| @@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2990 | goto failure; | 3205 | goto failure; |
| 2991 | } | 3206 | } |
| 2992 | 3207 | ||
| 2993 | plat_data = pdev->dev.platform_data; | ||
| 2994 | |||
| 2995 | /* Count the number of logical channels in use */ | 3208 | /* Count the number of logical channels in use */ |
| 2996 | for (i = 0; i < plat_data->dev_len; i++) | 3209 | for (i = 0; i < plat_data->dev_len; i++) |
| 2997 | if (plat_data->dev_rx[i] != 0) | 3210 | if (plat_data->dev_rx[i] != 0) |
| @@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 3022 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | 3235 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); |
| 3023 | base->log_chans = &base->phy_chans[num_phy_chans]; | 3236 | base->log_chans = &base->phy_chans[num_phy_chans]; |
| 3024 | 3237 | ||
| 3238 | if (base->plat_data->num_of_phy_chans == 14) { | ||
| 3239 | base->gen_dmac.backup = d40_backup_regs_v4b; | ||
| 3240 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; | ||
| 3241 | base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; | ||
| 3242 | base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; | ||
| 3243 | base->gen_dmac.realtime_en = D40_DREG_CRSEG1; | ||
| 3244 | base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; | ||
| 3245 | base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; | ||
| 3246 | base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; | ||
| 3247 | base->gen_dmac.il = il_v4b; | ||
| 3248 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); | ||
| 3249 | base->gen_dmac.init_reg = dma_init_reg_v4b; | ||
| 3250 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); | ||
| 3251 | } else { | ||
| 3252 | if (base->rev >= 3) { | ||
| 3253 | base->gen_dmac.backup = d40_backup_regs_v4a; | ||
| 3254 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; | ||
| 3255 | } | ||
| 3256 | base->gen_dmac.interrupt_en = D40_DREG_PCMIS; | ||
| 3257 | base->gen_dmac.interrupt_clear = D40_DREG_PCICR; | ||
| 3258 | base->gen_dmac.realtime_en = D40_DREG_RSEG1; | ||
| 3259 | base->gen_dmac.realtime_clear = D40_DREG_RCEG1; | ||
| 3260 | base->gen_dmac.high_prio_en = D40_DREG_PSEG1; | ||
| 3261 | base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; | ||
| 3262 | base->gen_dmac.il = il_v4a; | ||
| 3263 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); | ||
| 3264 | base->gen_dmac.init_reg = dma_init_reg_v4a; | ||
| 3265 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); | ||
| 3266 | } | ||
| 3267 | |||
| 3025 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), | 3268 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), |
| 3026 | GFP_KERNEL); | 3269 | GFP_KERNEL); |
| 3027 | if (!base->phy_res) | 3270 | if (!base->phy_res) |
| @@ -3093,31 +3336,15 @@ failure: | |||
| 3093 | static void __init d40_hw_init(struct d40_base *base) | 3336 | static void __init d40_hw_init(struct d40_base *base) |
| 3094 | { | 3337 | { |
| 3095 | 3338 | ||
| 3096 | static struct d40_reg_val dma_init_reg[] = { | ||
| 3097 | /* Clock every part of the DMA block from start */ | ||
| 3098 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | ||
| 3099 | |||
| 3100 | /* Interrupts on all logical channels */ | ||
| 3101 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | ||
| 3102 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | ||
| 3103 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | ||
| 3104 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | ||
| 3105 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | ||
| 3106 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | ||
| 3107 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | ||
| 3108 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | ||
| 3109 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | ||
| 3110 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | ||
| 3111 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | ||
| 3112 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | ||
| 3113 | }; | ||
| 3114 | int i; | 3339 | int i; |
| 3115 | u32 prmseo[2] = {0, 0}; | 3340 | u32 prmseo[2] = {0, 0}; |
| 3116 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | 3341 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; |
| 3117 | u32 pcmis = 0; | 3342 | u32 pcmis = 0; |
| 3118 | u32 pcicr = 0; | 3343 | u32 pcicr = 0; |
| 3344 | struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; | ||
| 3345 | u32 reg_size = base->gen_dmac.init_reg_size; | ||
| 3119 | 3346 | ||
| 3120 | for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) | 3347 | for (i = 0; i < reg_size; i++) |
| 3121 | writel(dma_init_reg[i].val, | 3348 | writel(dma_init_reg[i].val, |
| 3122 | base->virtbase + dma_init_reg[i].reg); | 3349 | base->virtbase + dma_init_reg[i].reg); |
| 3123 | 3350 | ||
| @@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base) | |||
| 3150 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | 3377 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); |
| 3151 | 3378 | ||
| 3152 | /* Write which interrupt to enable */ | 3379 | /* Write which interrupt to enable */ |
| 3153 | writel(pcmis, base->virtbase + D40_DREG_PCMIS); | 3380 | writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); |
| 3154 | 3381 | ||
| 3155 | /* Write which interrupt to clear */ | 3382 | /* Write which interrupt to clear */ |
| 3156 | writel(pcicr, base->virtbase + D40_DREG_PCICR); | 3383 | writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); |
| 3157 | 3384 | ||
| 3385 | /* These are __initdata and cannot be accessed after init */ | ||
| 3386 | base->gen_dmac.init_reg = NULL; | ||
| 3387 | base->gen_dmac.init_reg_size = 0; | ||
| 3158 | } | 3388 | } |
| 3159 | 3389 | ||
| 3160 | static int __init d40_lcla_allocate(struct d40_base *base) | 3390 | static int __init d40_lcla_allocate(struct d40_base *base) |
| @@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev) | |||
| 3362 | if (err) | 3592 | if (err) |
| 3363 | goto failure; | 3593 | goto failure; |
| 3364 | 3594 | ||
| 3595 | base->dev->dma_parms = &base->dma_parms; | ||
| 3596 | err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); | ||
| 3597 | if (err) { | ||
| 3598 | d40_err(&pdev->dev, "Failed to set dma max seg size\n"); | ||
| 3599 | goto failure; | ||
| 3600 | } | ||
| 3601 | |||
| 3365 | d40_hw_init(base); | 3602 | d40_hw_init(base); |
| 3366 | 3603 | ||
| 3367 | dev_info(base->dev, "initialized\n"); | 3604 | dev_info(base->dev, "initialized\n"); |
| @@ -3397,7 +3634,7 @@ failure: | |||
| 3397 | release_mem_region(base->phy_start, | 3634 | release_mem_region(base->phy_start, |
| 3398 | base->phy_size); | 3635 | base->phy_size); |
| 3399 | if (base->clk) { | 3636 | if (base->clk) { |
| 3400 | clk_disable(base->clk); | 3637 | clk_disable_unprepare(base->clk); |
| 3401 | clk_put(base->clk); | 3638 | clk_put(base->clk); |
| 3402 | } | 3639 | } |
| 3403 | 3640 | ||
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 851ad56e8409..7180e0d41722 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
| @@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | |||
| 102 | src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; | 102 | src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; |
| 103 | dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; | 103 | dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; |
| 104 | 104 | ||
| 105 | /* Set the priority bit to high for the physical channel */ | ||
| 106 | if (cfg->high_priority) { | ||
| 107 | src |= 1 << D40_SREG_CFG_PRI_POS; | ||
| 108 | dst |= 1 << D40_SREG_CFG_PRI_POS; | ||
| 109 | } | ||
| 110 | |||
| 105 | } else { | 111 | } else { |
| 106 | /* Logical channel */ | 112 | /* Logical channel */ |
| 107 | dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; | 113 | dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; |
| 108 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; | 114 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; |
| 109 | } | 115 | } |
| 110 | 116 | ||
| 111 | if (cfg->high_priority) { | ||
| 112 | src |= 1 << D40_SREG_CFG_PRI_POS; | ||
| 113 | dst |= 1 << D40_SREG_CFG_PRI_POS; | ||
| 114 | } | ||
| 115 | |||
| 116 | if (cfg->src_info.big_endian) | 117 | if (cfg->src_info.big_endian) |
| 117 | src |= 1 << D40_SREG_CFG_LBE_POS; | 118 | src |= 1 << D40_SREG_CFG_LBE_POS; |
| 118 | if (cfg->dst_info.big_endian) | 119 | if (cfg->dst_info.big_endian) |
| @@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, | |||
| 250 | 251 | ||
| 251 | return lli; | 252 | return lli; |
| 252 | 253 | ||
| 253 | err: | 254 | err: |
| 254 | return NULL; | 255 | return NULL; |
| 255 | } | 256 | } |
| 256 | 257 | ||
| @@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | |||
| 331 | { | 332 | { |
| 332 | d40_log_lli_link(lli_dst, lli_src, next, flags); | 333 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
| 333 | 334 | ||
| 334 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); | 335 | writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0); |
| 335 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); | 336 | writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1); |
| 336 | writel(lli_dst->lcsp02, &lcpa[0].lcsp2); | 337 | writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2); |
| 337 | writel(lli_dst->lcsp13, &lcpa[0].lcsp3); | 338 | writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3); |
| 338 | } | 339 | } |
| 339 | 340 | ||
| 340 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 341 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
| @@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | |||
| 344 | { | 345 | { |
| 345 | d40_log_lli_link(lli_dst, lli_src, next, flags); | 346 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
| 346 | 347 | ||
| 347 | writel(lli_src->lcsp02, &lcla[0].lcsp02); | 348 | writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02); |
| 348 | writel(lli_src->lcsp13, &lcla[0].lcsp13); | 349 | writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13); |
| 349 | writel(lli_dst->lcsp02, &lcla[1].lcsp02); | 350 | writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02); |
| 350 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); | 351 | writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13); |
| 351 | } | 352 | } |
| 352 | 353 | ||
| 353 | static void d40_log_fill_lli(struct d40_log_lli *lli, | 354 | static void d40_log_fill_lli(struct d40_log_lli *lli, |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 6d47373f3f58..fdde8ef77542 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
| @@ -125,7 +125,7 @@ | |||
| 125 | #define D40_DREG_GCC 0x000 | 125 | #define D40_DREG_GCC 0x000 |
| 126 | #define D40_DREG_GCC_ENA 0x1 | 126 | #define D40_DREG_GCC_ENA 0x1 |
| 127 | /* This assumes that there are only 4 event groups */ | 127 | /* This assumes that there are only 4 event groups */ |
| 128 | #define D40_DREG_GCC_ENABLE_ALL 0xff01 | 128 | #define D40_DREG_GCC_ENABLE_ALL 0x3ff01 |
| 129 | #define D40_DREG_GCC_EVTGRP_POS 8 | 129 | #define D40_DREG_GCC_EVTGRP_POS 8 |
| 130 | #define D40_DREG_GCC_SRC 0 | 130 | #define D40_DREG_GCC_SRC 0 |
| 131 | #define D40_DREG_GCC_DST 1 | 131 | #define D40_DREG_GCC_DST 1 |
| @@ -148,14 +148,31 @@ | |||
| 148 | 148 | ||
| 149 | #define D40_DREG_LCPA 0x020 | 149 | #define D40_DREG_LCPA 0x020 |
| 150 | #define D40_DREG_LCLA 0x024 | 150 | #define D40_DREG_LCLA 0x024 |
| 151 | |||
| 152 | #define D40_DREG_SSEG1 0x030 | ||
| 153 | #define D40_DREG_SSEG2 0x034 | ||
| 154 | #define D40_DREG_SSEG3 0x038 | ||
| 155 | #define D40_DREG_SSEG4 0x03C | ||
| 156 | |||
| 157 | #define D40_DREG_SCEG1 0x040 | ||
| 158 | #define D40_DREG_SCEG2 0x044 | ||
| 159 | #define D40_DREG_SCEG3 0x048 | ||
| 160 | #define D40_DREG_SCEG4 0x04C | ||
| 161 | |||
| 151 | #define D40_DREG_ACTIVE 0x050 | 162 | #define D40_DREG_ACTIVE 0x050 |
| 152 | #define D40_DREG_ACTIVO 0x054 | 163 | #define D40_DREG_ACTIVO 0x054 |
| 153 | #define D40_DREG_FSEB1 0x058 | 164 | #define D40_DREG_CIDMOD 0x058 |
| 154 | #define D40_DREG_FSEB2 0x05C | 165 | #define D40_DREG_TCIDV 0x05C |
| 155 | #define D40_DREG_PCMIS 0x060 | 166 | #define D40_DREG_PCMIS 0x060 |
| 156 | #define D40_DREG_PCICR 0x064 | 167 | #define D40_DREG_PCICR 0x064 |
| 157 | #define D40_DREG_PCTIS 0x068 | 168 | #define D40_DREG_PCTIS 0x068 |
| 158 | #define D40_DREG_PCEIS 0x06C | 169 | #define D40_DREG_PCEIS 0x06C |
| 170 | |||
| 171 | #define D40_DREG_SPCMIS 0x070 | ||
| 172 | #define D40_DREG_SPCICR 0x074 | ||
| 173 | #define D40_DREG_SPCTIS 0x078 | ||
| 174 | #define D40_DREG_SPCEIS 0x07C | ||
| 175 | |||
| 159 | #define D40_DREG_LCMIS0 0x080 | 176 | #define D40_DREG_LCMIS0 0x080 |
| 160 | #define D40_DREG_LCMIS1 0x084 | 177 | #define D40_DREG_LCMIS1 0x084 |
| 161 | #define D40_DREG_LCMIS2 0x088 | 178 | #define D40_DREG_LCMIS2 0x088 |
| @@ -172,6 +189,33 @@ | |||
| 172 | #define D40_DREG_LCEIS1 0x0B4 | 189 | #define D40_DREG_LCEIS1 0x0B4 |
| 173 | #define D40_DREG_LCEIS2 0x0B8 | 190 | #define D40_DREG_LCEIS2 0x0B8 |
| 174 | #define D40_DREG_LCEIS3 0x0BC | 191 | #define D40_DREG_LCEIS3 0x0BC |
| 192 | |||
| 193 | #define D40_DREG_SLCMIS1 0x0C0 | ||
| 194 | #define D40_DREG_SLCMIS2 0x0C4 | ||
| 195 | #define D40_DREG_SLCMIS3 0x0C8 | ||
| 196 | #define D40_DREG_SLCMIS4 0x0CC | ||
| 197 | |||
| 198 | #define D40_DREG_SLCICR1 0x0D0 | ||
| 199 | #define D40_DREG_SLCICR2 0x0D4 | ||
| 200 | #define D40_DREG_SLCICR3 0x0D8 | ||
| 201 | #define D40_DREG_SLCICR4 0x0DC | ||
| 202 | |||
| 203 | #define D40_DREG_SLCTIS1 0x0E0 | ||
| 204 | #define D40_DREG_SLCTIS2 0x0E4 | ||
| 205 | #define D40_DREG_SLCTIS3 0x0E8 | ||
| 206 | #define D40_DREG_SLCTIS4 0x0EC | ||
| 207 | |||
| 208 | #define D40_DREG_SLCEIS1 0x0F0 | ||
| 209 | #define D40_DREG_SLCEIS2 0x0F4 | ||
| 210 | #define D40_DREG_SLCEIS3 0x0F8 | ||
| 211 | #define D40_DREG_SLCEIS4 0x0FC | ||
| 212 | |||
| 213 | #define D40_DREG_FSESS1 0x100 | ||
| 214 | #define D40_DREG_FSESS2 0x104 | ||
| 215 | |||
| 216 | #define D40_DREG_FSEBS1 0x108 | ||
| 217 | #define D40_DREG_FSEBS2 0x10C | ||
| 218 | |||
| 175 | #define D40_DREG_PSEG1 0x110 | 219 | #define D40_DREG_PSEG1 0x110 |
| 176 | #define D40_DREG_PSEG2 0x114 | 220 | #define D40_DREG_PSEG2 0x114 |
| 177 | #define D40_DREG_PSEG3 0x118 | 221 | #define D40_DREG_PSEG3 0x118 |
| @@ -188,6 +232,86 @@ | |||
| 188 | #define D40_DREG_RCEG2 0x144 | 232 | #define D40_DREG_RCEG2 0x144 |
| 189 | #define D40_DREG_RCEG3 0x148 | 233 | #define D40_DREG_RCEG3 0x148 |
| 190 | #define D40_DREG_RCEG4 0x14C | 234 | #define D40_DREG_RCEG4 0x14C |
| 235 | |||
| 236 | #define D40_DREG_PREFOT 0x15C | ||
| 237 | #define D40_DREG_EXTCFG 0x160 | ||
| 238 | |||
| 239 | #define D40_DREG_CPSEG1 0x200 | ||
| 240 | #define D40_DREG_CPSEG2 0x204 | ||
| 241 | #define D40_DREG_CPSEG3 0x208 | ||
| 242 | #define D40_DREG_CPSEG4 0x20C | ||
| 243 | #define D40_DREG_CPSEG5 0x210 | ||
| 244 | |||
| 245 | #define D40_DREG_CPCEG1 0x220 | ||
| 246 | #define D40_DREG_CPCEG2 0x224 | ||
| 247 | #define D40_DREG_CPCEG3 0x228 | ||
| 248 | #define D40_DREG_CPCEG4 0x22C | ||
| 249 | #define D40_DREG_CPCEG5 0x230 | ||
| 250 | |||
| 251 | #define D40_DREG_CRSEG1 0x240 | ||
| 252 | #define D40_DREG_CRSEG2 0x244 | ||
| 253 | #define D40_DREG_CRSEG3 0x248 | ||
| 254 | #define D40_DREG_CRSEG4 0x24C | ||
| 255 | #define D40_DREG_CRSEG5 0x250 | ||
| 256 | |||
| 257 | #define D40_DREG_CRCEG1 0x260 | ||
| 258 | #define D40_DREG_CRCEG2 0x264 | ||
| 259 | #define D40_DREG_CRCEG3 0x268 | ||
| 260 | #define D40_DREG_CRCEG4 0x26C | ||
| 261 | #define D40_DREG_CRCEG5 0x270 | ||
| 262 | |||
| 263 | #define D40_DREG_CFSESS1 0x280 | ||
| 264 | #define D40_DREG_CFSESS2 0x284 | ||
| 265 | #define D40_DREG_CFSESS3 0x288 | ||
| 266 | |||
| 267 | #define D40_DREG_CFSEBS1 0x290 | ||
| 268 | #define D40_DREG_CFSEBS2 0x294 | ||
| 269 | #define D40_DREG_CFSEBS3 0x298 | ||
| 270 | |||
| 271 | #define D40_DREG_CLCMIS1 0x300 | ||
| 272 | #define D40_DREG_CLCMIS2 0x304 | ||
| 273 | #define D40_DREG_CLCMIS3 0x308 | ||
| 274 | #define D40_DREG_CLCMIS4 0x30C | ||
| 275 | #define D40_DREG_CLCMIS5 0x310 | ||
| 276 | |||
| 277 | #define D40_DREG_CLCICR1 0x320 | ||
| 278 | #define D40_DREG_CLCICR2 0x324 | ||
| 279 | #define D40_DREG_CLCICR3 0x328 | ||
| 280 | #define D40_DREG_CLCICR4 0x32C | ||
| 281 | #define D40_DREG_CLCICR5 0x330 | ||
| 282 | |||
| 283 | #define D40_DREG_CLCTIS1 0x340 | ||
| 284 | #define D40_DREG_CLCTIS2 0x344 | ||
| 285 | #define D40_DREG_CLCTIS3 0x348 | ||
| 286 | #define D40_DREG_CLCTIS4 0x34C | ||
| 287 | #define D40_DREG_CLCTIS5 0x350 | ||
| 288 | |||
| 289 | #define D40_DREG_CLCEIS1 0x360 | ||
| 290 | #define D40_DREG_CLCEIS2 0x364 | ||
| 291 | #define D40_DREG_CLCEIS3 0x368 | ||
| 292 | #define D40_DREG_CLCEIS4 0x36C | ||
| 293 | #define D40_DREG_CLCEIS5 0x370 | ||
| 294 | |||
| 295 | #define D40_DREG_CPCMIS 0x380 | ||
| 296 | #define D40_DREG_CPCICR 0x384 | ||
| 297 | #define D40_DREG_CPCTIS 0x388 | ||
| 298 | #define D40_DREG_CPCEIS 0x38C | ||
| 299 | |||
| 300 | #define D40_DREG_SCCIDA1 0xE80 | ||
| 301 | #define D40_DREG_SCCIDA2 0xE90 | ||
| 302 | #define D40_DREG_SCCIDA3 0xEA0 | ||
| 303 | #define D40_DREG_SCCIDA4 0xEB0 | ||
| 304 | #define D40_DREG_SCCIDA5 0xEC0 | ||
| 305 | |||
| 306 | #define D40_DREG_SCCIDB1 0xE84 | ||
| 307 | #define D40_DREG_SCCIDB2 0xE94 | ||
| 308 | #define D40_DREG_SCCIDB3 0xEA4 | ||
| 309 | #define D40_DREG_SCCIDB4 0xEB4 | ||
| 310 | #define D40_DREG_SCCIDB5 0xEC4 | ||
| 311 | |||
| 312 | #define D40_DREG_PRSCCIDA 0xF80 | ||
| 313 | #define D40_DREG_PRSCCIDB 0xF84 | ||
| 314 | |||
| 191 | #define D40_DREG_STFU 0xFC8 | 315 | #define D40_DREG_STFU 0xFC8 |
| 192 | #define D40_DREG_ICFG 0xFCC | 316 | #define D40_DREG_ICFG 0xFCC |
| 193 | #define D40_DREG_PERIPHID0 0xFE0 | 317 | #define D40_DREG_PERIPHID0 0xFE0 |
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h index 9ff93b065686..4b781014b0a0 100644 --- a/include/linux/platform_data/dma-ste-dma40.h +++ b/include/linux/platform_data/dma-ste-dma40.h | |||
| @@ -147,6 +147,16 @@ struct stedma40_chan_cfg { | |||
| 147 | * @memcpy_conf_log: default configuration of logical channel memcpy | 147 | * @memcpy_conf_log: default configuration of logical channel memcpy |
| 148 | * @disabled_channels: A vector, ending with -1, that marks physical channels | 148 | * @disabled_channels: A vector, ending with -1, that marks physical channels |
| 149 | * that are for different reasons not available for the driver. | 149 | * that are for different reasons not available for the driver. |
| 150 | * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW | ||
| 151 | * which avoids HW bug that exists in some versions of the controller. | ||
| 152 | * SoftLLI introduces relink overhead that could impact performace for | ||
| 153 | * certain use cases. | ||
| 154 | * @num_of_soft_lli_chans: The number of channels that needs to be configured | ||
| 155 | * to use SoftLLI. | ||
| 156 | * @use_esram_lcla: flag for mapping the lcla into esram region | ||
| 157 | * @num_of_phy_chans: The number of physical channels implemented in HW. | ||
| 158 | * 0 means reading the number of channels from DMA HW but this is only valid | ||
| 159 | * for 'multiple of 4' channels, like 8. | ||
| 150 | */ | 160 | */ |
| 151 | struct stedma40_platform_data { | 161 | struct stedma40_platform_data { |
| 152 | u32 dev_len; | 162 | u32 dev_len; |
| @@ -157,7 +167,10 @@ struct stedma40_platform_data { | |||
| 157 | struct stedma40_chan_cfg *memcpy_conf_phy; | 167 | struct stedma40_chan_cfg *memcpy_conf_phy; |
| 158 | struct stedma40_chan_cfg *memcpy_conf_log; | 168 | struct stedma40_chan_cfg *memcpy_conf_log; |
| 159 | int disabled_channels[STEDMA40_MAX_PHYS]; | 169 | int disabled_channels[STEDMA40_MAX_PHYS]; |
| 170 | int *soft_lli_chans; | ||
| 171 | int num_of_soft_lli_chans; | ||
| 160 | bool use_esram_lcla; | 172 | bool use_esram_lcla; |
| 173 | int num_of_phy_chans; | ||
| 161 | }; | 174 | }; |
| 162 | 175 | ||
| 163 | #ifdef CONFIG_STE_DMA40 | 176 | #ifdef CONFIG_STE_DMA40 |
