diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/dma/shdma.c | 500 | ||||
| -rw-r--r-- | drivers/dma/shdma.h | 26 | ||||
| -rw-r--r-- | drivers/serial/Kconfig | 4 | ||||
| -rw-r--r-- | drivers/serial/sh-sci.c | 616 |
4 files changed, 898 insertions, 248 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index b75ce8b84c46..5d17e09cb625 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
| @@ -24,8 +24,10 @@ | |||
| 24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
| 25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
| 26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
| 27 | #include <cpu/dma.h> | 27 | #include <linux/pm_runtime.h> |
| 28 | #include <asm/dma-sh.h> | 28 | |
| 29 | #include <asm/dmaengine.h> | ||
| 30 | |||
| 29 | #include "shdma.h" | 31 | #include "shdma.h" |
| 30 | 32 | ||
| 31 | /* DMA descriptor control */ | 33 | /* DMA descriptor control */ |
| @@ -38,30 +40,32 @@ enum sh_dmae_desc_status { | |||
| 38 | }; | 40 | }; |
| 39 | 41 | ||
| 40 | #define NR_DESCS_PER_CHANNEL 32 | 42 | #define NR_DESCS_PER_CHANNEL 32 |
| 41 | /* | 43 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
| 42 | * Define the default configuration for dual address memory-memory transfer. | 44 | #define LOG2_DEFAULT_XFER_SIZE 2 |
| 43 | * The 0x400 value represents auto-request, external->external. | ||
| 44 | * | ||
| 45 | * And this driver set 4byte burst mode. | ||
| 46 | * If you want to change mode, you need to change RS_DEFAULT of value. | ||
| 47 | * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) | ||
| 48 | */ | ||
| 49 | #define RS_DEFAULT (RS_DUAL) | ||
| 50 | 45 | ||
| 51 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | 46 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ |
| 52 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; | 47 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; |
| 53 | 48 | ||
| 54 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 49 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
| 55 | 50 | ||
| 56 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | ||
| 57 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 51 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
| 58 | { | 52 | { |
| 59 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); | 53 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); |
| 60 | } | 54 | } |
| 61 | 55 | ||
| 62 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 56 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
| 63 | { | 57 | { |
| 64 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); | 58 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); |
| 59 | } | ||
| 60 | |||
| 61 | static u16 dmaor_read(struct sh_dmae_device *shdev) | ||
| 62 | { | ||
| 63 | return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); | ||
| 64 | } | ||
| 65 | |||
| 66 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
| 67 | { | ||
| 68 | __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); | ||
| 65 | } | 69 | } |
| 66 | 70 | ||
| 67 | /* | 71 | /* |
| @@ -69,24 +73,23 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | |||
| 69 | * | 73 | * |
| 70 | * SH7780 has two DMAOR register | 74 | * SH7780 has two DMAOR register |
| 71 | */ | 75 | */ |
| 72 | static void sh_dmae_ctl_stop(int id) | 76 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
| 73 | { | 77 | { |
| 74 | unsigned short dmaor = dmaor_read_reg(id); | 78 | unsigned short dmaor = dmaor_read(shdev); |
| 75 | 79 | ||
| 76 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | 80 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
| 77 | dmaor_write_reg(id, dmaor); | ||
| 78 | } | 81 | } |
| 79 | 82 | ||
| 80 | static int sh_dmae_rst(int id) | 83 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
| 81 | { | 84 | { |
| 82 | unsigned short dmaor; | 85 | unsigned short dmaor; |
| 83 | 86 | ||
| 84 | sh_dmae_ctl_stop(id); | 87 | sh_dmae_ctl_stop(shdev); |
| 85 | dmaor = dmaor_read_reg(id) | DMAOR_INIT; | 88 | dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; |
| 86 | 89 | ||
| 87 | dmaor_write_reg(id, dmaor); | 90 | dmaor_write(shdev, dmaor); |
| 88 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { | 91 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { |
| 89 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | 92 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); |
| 90 | return -EINVAL; | 93 | return -EINVAL; |
| 91 | } | 94 | } |
| 92 | return 0; | 95 | return 0; |
| @@ -102,13 +105,36 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | |||
| 102 | return false; /* waiting */ | 105 | return false; /* waiting */ |
| 103 | } | 106 | } |
| 104 | 107 | ||
| 105 | static unsigned int ts_shift[] = TS_SHIFT; | 108 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) |
| 106 | static inline unsigned int calc_xmit_shift(u32 chcr) | ||
| 107 | { | 109 | { |
| 108 | int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | | 110 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
| 109 | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); | 111 | struct sh_dmae_device, common); |
| 112 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
| 113 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | ||
| 114 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | ||
| 115 | |||
| 116 | if (cnt >= pdata->ts_shift_num) | ||
| 117 | cnt = 0; | ||
| 110 | 118 | ||
| 111 | return ts_shift[cnt]; | 119 | return pdata->ts_shift[cnt]; |
| 120 | } | ||
| 121 | |||
| 122 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | ||
| 123 | { | ||
| 124 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | ||
| 125 | struct sh_dmae_device, common); | ||
| 126 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
| 127 | int i; | ||
| 128 | |||
| 129 | for (i = 0; i < pdata->ts_shift_num; i++) | ||
| 130 | if (pdata->ts_shift[i] == l2size) | ||
| 131 | break; | ||
| 132 | |||
| 133 | if (i == pdata->ts_shift_num) | ||
| 134 | i = 0; | ||
| 135 | |||
| 136 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | ||
| 137 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | ||
| 112 | } | 138 | } |
| 113 | 139 | ||
| 114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | 140 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
| @@ -136,8 +162,13 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
| 136 | 162 | ||
| 137 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 163 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
| 138 | { | 164 | { |
| 139 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | 165 | /* |
| 140 | sh_chan->xmit_shift = calc_xmit_shift(chcr); | 166 | * Default configuration for dual address memory-memory transfer. |
| 167 | * 0x400 represents auto-request. | ||
| 168 | */ | ||
| 169 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | ||
| 170 | LOG2_DEFAULT_XFER_SIZE); | ||
| 171 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | ||
| 141 | sh_dmae_writel(sh_chan, chcr, CHCR); | 172 | sh_dmae_writel(sh_chan, chcr, CHCR); |
| 142 | } | 173 | } |
| 143 | 174 | ||
| @@ -147,37 +178,26 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | |||
| 147 | if (dmae_is_busy(sh_chan)) | 178 | if (dmae_is_busy(sh_chan)) |
| 148 | return -EBUSY; | 179 | return -EBUSY; |
| 149 | 180 | ||
| 150 | sh_chan->xmit_shift = calc_xmit_shift(val); | 181 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); |
| 151 | sh_dmae_writel(sh_chan, val, CHCR); | 182 | sh_dmae_writel(sh_chan, val, CHCR); |
| 152 | 183 | ||
| 153 | return 0; | 184 | return 0; |
| 154 | } | 185 | } |
| 155 | 186 | ||
| 156 | #define DMARS_SHIFT 8 | ||
| 157 | #define DMARS_CHAN_MSK 0x01 | ||
| 158 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 187 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
| 159 | { | 188 | { |
| 160 | u32 addr; | 189 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
| 161 | int shift = 0; | 190 | struct sh_dmae_device, common); |
| 191 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
| 192 | struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | ||
| 193 | u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); | ||
| 194 | int shift = chan_pdata->dmars_bit; | ||
| 162 | 195 | ||
| 163 | if (dmae_is_busy(sh_chan)) | 196 | if (dmae_is_busy(sh_chan)) |
| 164 | return -EBUSY; | 197 | return -EBUSY; |
| 165 | 198 | ||
| 166 | if (sh_chan->id & DMARS_CHAN_MSK) | 199 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), |
| 167 | shift = DMARS_SHIFT; | 200 | addr); |
| 168 | |||
| 169 | if (sh_chan->id < 6) | ||
| 170 | /* DMA0RS0 - DMA0RS2 */ | ||
| 171 | addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; | ||
| 172 | #ifdef SH_DMARS_BASE1 | ||
| 173 | else if (sh_chan->id < 12) | ||
| 174 | /* DMA1RS0 - DMA1RS2 */ | ||
| 175 | addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; | ||
| 176 | #endif | ||
| 177 | else | ||
| 178 | return -EINVAL; | ||
| 179 | |||
| 180 | ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); | ||
| 181 | 201 | ||
| 182 | return 0; | 202 | return 0; |
| 183 | } | 203 | } |
| @@ -251,15 +271,15 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave( | |||
| 251 | struct dma_device *dma_dev = sh_chan->common.device; | 271 | struct dma_device *dma_dev = sh_chan->common.device; |
| 252 | struct sh_dmae_device *shdev = container_of(dma_dev, | 272 | struct sh_dmae_device *shdev = container_of(dma_dev, |
| 253 | struct sh_dmae_device, common); | 273 | struct sh_dmae_device, common); |
| 254 | struct sh_dmae_pdata *pdata = &shdev->pdata; | 274 | struct sh_dmae_pdata *pdata = shdev->pdata; |
| 255 | int i; | 275 | int i; |
| 256 | 276 | ||
| 257 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) | 277 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) |
| 258 | return NULL; | 278 | return NULL; |
| 259 | 279 | ||
| 260 | for (i = 0; i < pdata->config_num; i++) | 280 | for (i = 0; i < pdata->slave_num; i++) |
| 261 | if (pdata->config[i].slave_id == slave_id) | 281 | if (pdata->slave[i].slave_id == slave_id) |
| 262 | return pdata->config + i; | 282 | return pdata->slave + i; |
| 263 | 283 | ||
| 264 | return NULL; | 284 | return NULL; |
| 265 | } | 285 | } |
| @@ -270,6 +290,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
| 270 | struct sh_desc *desc; | 290 | struct sh_desc *desc; |
| 271 | struct sh_dmae_slave *param = chan->private; | 291 | struct sh_dmae_slave *param = chan->private; |
| 272 | 292 | ||
| 293 | pm_runtime_get_sync(sh_chan->dev); | ||
| 294 | |||
| 273 | /* | 295 | /* |
| 274 | * This relies on the guarantee from dmaengine that alloc_chan_resources | 296 | * This relies on the guarantee from dmaengine that alloc_chan_resources |
| 275 | * never runs concurrently with itself or free_chan_resources. | 297 | * never runs concurrently with itself or free_chan_resources. |
| @@ -288,9 +310,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
| 288 | 310 | ||
| 289 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 311 | dmae_set_dmars(sh_chan, cfg->mid_rid); |
| 290 | dmae_set_chcr(sh_chan, cfg->chcr); | 312 | dmae_set_chcr(sh_chan, cfg->chcr); |
| 291 | } else { | 313 | } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { |
| 292 | if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) | 314 | dmae_init(sh_chan); |
| 293 | dmae_set_chcr(sh_chan, RS_DEFAULT); | ||
| 294 | } | 315 | } |
| 295 | 316 | ||
| 296 | spin_lock_bh(&sh_chan->desc_lock); | 317 | spin_lock_bh(&sh_chan->desc_lock); |
| @@ -312,6 +333,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
| 312 | } | 333 | } |
| 313 | spin_unlock_bh(&sh_chan->desc_lock); | 334 | spin_unlock_bh(&sh_chan->desc_lock); |
| 314 | 335 | ||
| 336 | if (!sh_chan->descs_allocated) | ||
| 337 | pm_runtime_put(sh_chan->dev); | ||
| 338 | |||
| 315 | return sh_chan->descs_allocated; | 339 | return sh_chan->descs_allocated; |
| 316 | } | 340 | } |
| 317 | 341 | ||
| @@ -323,6 +347,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
| 323 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 347 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
| 324 | struct sh_desc *desc, *_desc; | 348 | struct sh_desc *desc, *_desc; |
| 325 | LIST_HEAD(list); | 349 | LIST_HEAD(list); |
| 350 | int descs = sh_chan->descs_allocated; | ||
| 326 | 351 | ||
| 327 | dmae_halt(sh_chan); | 352 | dmae_halt(sh_chan); |
| 328 | 353 | ||
| @@ -343,6 +368,9 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
| 343 | 368 | ||
| 344 | spin_unlock_bh(&sh_chan->desc_lock); | 369 | spin_unlock_bh(&sh_chan->desc_lock); |
| 345 | 370 | ||
| 371 | if (descs > 0) | ||
| 372 | pm_runtime_put(sh_chan->dev); | ||
| 373 | |||
| 346 | list_for_each_entry_safe(desc, _desc, &list, node) | 374 | list_for_each_entry_safe(desc, _desc, &list, node) |
| 347 | kfree(desc); | 375 | kfree(desc); |
| 348 | } | 376 | } |
| @@ -559,6 +587,19 @@ static void sh_dmae_terminate_all(struct dma_chan *chan) | |||
| 559 | if (!chan) | 587 | if (!chan) |
| 560 | return; | 588 | return; |
| 561 | 589 | ||
| 590 | dmae_halt(sh_chan); | ||
| 591 | |||
| 592 | spin_lock_bh(&sh_chan->desc_lock); | ||
| 593 | if (!list_empty(&sh_chan->ld_queue)) { | ||
| 594 | /* Record partial transfer */ | ||
| 595 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | ||
| 596 | struct sh_desc, node); | ||
| 597 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
| 598 | sh_chan->xmit_shift; | ||
| 599 | |||
| 600 | } | ||
| 601 | spin_unlock_bh(&sh_chan->desc_lock); | ||
| 602 | |||
| 562 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 603 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
| 563 | } | 604 | } |
| 564 | 605 | ||
| @@ -661,7 +702,7 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | |||
| 661 | 702 | ||
| 662 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 703 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
| 663 | { | 704 | { |
| 664 | struct sh_desc *sd; | 705 | struct sh_desc *desc; |
| 665 | 706 | ||
| 666 | spin_lock_bh(&sh_chan->desc_lock); | 707 | spin_lock_bh(&sh_chan->desc_lock); |
| 667 | /* DMA work check */ | 708 | /* DMA work check */ |
| @@ -671,10 +712,13 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
| 671 | } | 712 | } |
| 672 | 713 | ||
| 673 | /* Find the first not transferred desciptor */ | 714 | /* Find the first not transferred desciptor */ |
| 674 | list_for_each_entry(sd, &sh_chan->ld_queue, node) | 715 | list_for_each_entry(desc, &sh_chan->ld_queue, node) |
| 675 | if (sd->mark == DESC_SUBMITTED) { | 716 | if (desc->mark == DESC_SUBMITTED) { |
| 717 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
| 718 | desc->async_tx.cookie, sh_chan->id, | ||
| 719 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); | ||
| 676 | /* Get the ld start address from ld_queue */ | 720 | /* Get the ld start address from ld_queue */ |
| 677 | dmae_set_reg(sh_chan, &sd->hw); | 721 | dmae_set_reg(sh_chan, &desc->hw); |
| 678 | dmae_start(sh_chan); | 722 | dmae_start(sh_chan); |
| 679 | break; | 723 | break; |
| 680 | } | 724 | } |
| @@ -696,6 +740,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
| 696 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 740 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
| 697 | dma_cookie_t last_used; | 741 | dma_cookie_t last_used; |
| 698 | dma_cookie_t last_complete; | 742 | dma_cookie_t last_complete; |
| 743 | enum dma_status status; | ||
| 699 | 744 | ||
| 700 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 745 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
| 701 | 746 | ||
| @@ -709,7 +754,27 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
| 709 | if (used) | 754 | if (used) |
| 710 | *used = last_used; | 755 | *used = last_used; |
| 711 | 756 | ||
| 712 | return dma_async_is_complete(cookie, last_complete, last_used); | 757 | spin_lock_bh(&sh_chan->desc_lock); |
| 758 | |||
| 759 | status = dma_async_is_complete(cookie, last_complete, last_used); | ||
| 760 | |||
| 761 | /* | ||
| 762 | * If we don't find cookie on the queue, it has been aborted and we have | ||
| 763 | * to report error | ||
| 764 | */ | ||
| 765 | if (status != DMA_SUCCESS) { | ||
| 766 | struct sh_desc *desc; | ||
| 767 | status = DMA_ERROR; | ||
| 768 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
| 769 | if (desc->cookie == cookie) { | ||
| 770 | status = DMA_IN_PROGRESS; | ||
| 771 | break; | ||
| 772 | } | ||
| 773 | } | ||
| 774 | |||
| 775 | spin_unlock_bh(&sh_chan->desc_lock); | ||
| 776 | |||
| 777 | return status; | ||
| 713 | } | 778 | } |
| 714 | 779 | ||
| 715 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 780 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
| @@ -732,40 +797,32 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
| 732 | #if defined(CONFIG_CPU_SH4) | 797 | #if defined(CONFIG_CPU_SH4) |
| 733 | static irqreturn_t sh_dmae_err(int irq, void *data) | 798 | static irqreturn_t sh_dmae_err(int irq, void *data) |
| 734 | { | 799 | { |
| 735 | int err = 0; | ||
| 736 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 800 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; |
| 801 | int i; | ||
| 737 | 802 | ||
| 738 | /* IRQ Multi */ | 803 | /* halt the dma controller */ |
| 739 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 804 | sh_dmae_ctl_stop(shdev); |
| 740 | int __maybe_unused cnt = 0; | 805 | |
| 741 | switch (irq) { | 806 | /* We cannot detect, which channel caused the error, have to reset all */ |
| 742 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 807 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
| 743 | case DMTE6_IRQ: | 808 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
| 744 | cnt++; | 809 | if (sh_chan) { |
| 745 | #endif | 810 | struct sh_desc *desc; |
| 746 | case DMTE0_IRQ: | 811 | /* Stop the channel */ |
| 747 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { | 812 | dmae_halt(sh_chan); |
| 748 | disable_irq(irq); | 813 | /* Complete all */ |
| 749 | return IRQ_HANDLED; | 814 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
| 815 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
| 816 | desc->mark = DESC_IDLE; | ||
| 817 | if (tx->callback) | ||
| 818 | tx->callback(tx->callback_param); | ||
| 750 | } | 819 | } |
| 751 | default: | 820 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); |
| 752 | return IRQ_NONE; | ||
| 753 | } | 821 | } |
| 754 | } else { | ||
| 755 | /* reset dma controller */ | ||
| 756 | err = sh_dmae_rst(0); | ||
| 757 | if (err) | ||
| 758 | return err; | ||
| 759 | #ifdef SH_DMAC_BASE1 | ||
| 760 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
| 761 | err = sh_dmae_rst(1); | ||
| 762 | if (err) | ||
| 763 | return err; | ||
| 764 | } | ||
| 765 | #endif | ||
| 766 | disable_irq(irq); | ||
| 767 | return IRQ_HANDLED; | ||
| 768 | } | 822 | } |
| 823 | sh_dmae_rst(shdev); | ||
| 824 | |||
| 825 | return IRQ_HANDLED; | ||
| 769 | } | 826 | } |
| 770 | #endif | 827 | #endif |
| 771 | 828 | ||
| @@ -796,19 +853,12 @@ static void dmae_do_tasklet(unsigned long data) | |||
| 796 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 853 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
| 797 | } | 854 | } |
| 798 | 855 | ||
| 799 | static unsigned int get_dmae_irq(unsigned int id) | 856 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
| 800 | { | 857 | int irq, unsigned long flags) |
| 801 | unsigned int irq = 0; | ||
| 802 | if (id < ARRAY_SIZE(dmte_irq_map)) | ||
| 803 | irq = dmte_irq_map[id]; | ||
| 804 | return irq; | ||
| 805 | } | ||
| 806 | |||
| 807 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | ||
| 808 | { | 858 | { |
| 809 | int err; | 859 | int err; |
| 810 | unsigned int irq = get_dmae_irq(id); | 860 | struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; |
| 811 | unsigned long irqflags = IRQF_DISABLED; | 861 | struct platform_device *pdev = to_platform_device(shdev->common.dev); |
| 812 | struct sh_dmae_chan *new_sh_chan; | 862 | struct sh_dmae_chan *new_sh_chan; |
| 813 | 863 | ||
| 814 | /* alloc channel */ | 864 | /* alloc channel */ |
| @@ -819,8 +869,13 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
| 819 | return -ENOMEM; | 869 | return -ENOMEM; |
| 820 | } | 870 | } |
| 821 | 871 | ||
| 872 | /* copy struct dma_device */ | ||
| 873 | new_sh_chan->common.device = &shdev->common; | ||
| 874 | |||
| 822 | new_sh_chan->dev = shdev->common.dev; | 875 | new_sh_chan->dev = shdev->common.dev; |
| 823 | new_sh_chan->id = id; | 876 | new_sh_chan->id = id; |
| 877 | new_sh_chan->irq = irq; | ||
| 878 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
| 824 | 879 | ||
| 825 | /* Init DMA tasklet */ | 880 | /* Init DMA tasklet */ |
| 826 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 881 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
| @@ -835,29 +890,20 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
| 835 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | 890 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); |
| 836 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | 891 | INIT_LIST_HEAD(&new_sh_chan->ld_free); |
| 837 | 892 | ||
| 838 | /* copy struct dma_device */ | ||
| 839 | new_sh_chan->common.device = &shdev->common; | ||
| 840 | |||
| 841 | /* Add the channel to DMA device channel list */ | 893 | /* Add the channel to DMA device channel list */ |
| 842 | list_add_tail(&new_sh_chan->common.device_node, | 894 | list_add_tail(&new_sh_chan->common.device_node, |
| 843 | &shdev->common.channels); | 895 | &shdev->common.channels); |
| 844 | shdev->common.chancnt++; | 896 | shdev->common.chancnt++; |
| 845 | 897 | ||
| 846 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 898 | if (pdev->id >= 0) |
| 847 | irqflags = IRQF_SHARED; | 899 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
| 848 | #if defined(DMTE6_IRQ) | 900 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); |
| 849 | if (irq >= DMTE6_IRQ) | 901 | else |
| 850 | irq = DMTE6_IRQ; | 902 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
| 851 | else | 903 | "sh-dma%d", new_sh_chan->id); |
| 852 | #endif | ||
| 853 | irq = DMTE0_IRQ; | ||
| 854 | } | ||
| 855 | |||
| 856 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | ||
| 857 | "sh-dmae%d", new_sh_chan->id); | ||
| 858 | 904 | ||
| 859 | /* set up channel irq */ | 905 | /* set up channel irq */ |
| 860 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, | 906 | err = request_irq(irq, &sh_dmae_interrupt, flags, |
| 861 | new_sh_chan->dev_id, new_sh_chan); | 907 | new_sh_chan->dev_id, new_sh_chan); |
| 862 | if (err) { | 908 | if (err) { |
| 863 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 909 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
| @@ -881,12 +927,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
| 881 | 927 | ||
| 882 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | 928 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { |
| 883 | if (shdev->chan[i]) { | 929 | if (shdev->chan[i]) { |
| 884 | struct sh_dmae_chan *shchan = shdev->chan[i]; | 930 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
| 885 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) | ||
| 886 | free_irq(dmte_irq_map[i], shchan); | ||
| 887 | 931 | ||
| 888 | list_del(&shchan->common.device_node); | 932 | free_irq(sh_chan->irq, sh_chan); |
| 889 | kfree(shchan); | 933 | |
| 934 | list_del(&sh_chan->common.device_node); | ||
| 935 | kfree(sh_chan); | ||
| 890 | shdev->chan[i] = NULL; | 936 | shdev->chan[i] = NULL; |
| 891 | } | 937 | } |
| 892 | } | 938 | } |
| @@ -895,47 +941,84 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
| 895 | 941 | ||
| 896 | static int __init sh_dmae_probe(struct platform_device *pdev) | 942 | static int __init sh_dmae_probe(struct platform_device *pdev) |
| 897 | { | 943 | { |
| 898 | int err = 0, cnt, ecnt; | 944 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
| 899 | unsigned long irqflags = IRQF_DISABLED; | 945 | unsigned long irqflags = IRQF_DISABLED, |
| 900 | #if defined(CONFIG_CPU_SH4) | 946 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; |
| 901 | int eirq[] = { DMAE0_IRQ, | 947 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; |
| 902 | #if defined(DMAE1_IRQ) | 948 | int err, i, irq_cnt = 0, irqres = 0; |
| 903 | DMAE1_IRQ | ||
| 904 | #endif | ||
| 905 | }; | ||
| 906 | #endif | ||
| 907 | struct sh_dmae_device *shdev; | 949 | struct sh_dmae_device *shdev; |
| 950 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
| 908 | 951 | ||
| 909 | /* get platform data */ | 952 | /* get platform data */ |
| 910 | if (!pdev->dev.platform_data) | 953 | if (!pdata || !pdata->channel_num) |
| 911 | return -ENODEV; | 954 | return -ENODEV; |
| 912 | 955 | ||
| 956 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 957 | /* DMARS area is optional, if absent, this controller cannot do slave DMA */ | ||
| 958 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 959 | /* | ||
| 960 | * IRQ resources: | ||
| 961 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
| 962 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
| 963 | * start == end. If it is the only IRQ resource, all channels also | ||
| 964 | * use the same IRQ. | ||
| 965 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
| 966 | * ranges (start != end) | ||
| 967 | * 3. iff all events (channels and, optionally, error) on this | ||
| 968 | * controller use the same IRQ, only one IRQ resource can be | ||
| 969 | * specified, otherwise there must be one IRQ per channel, even if | ||
| 970 | * some of them are equal | ||
| 971 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
| 972 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
| 973 | * requested with the IRQF_SHARED flag | ||
| 974 | */ | ||
| 975 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
| 976 | if (!chan || !errirq_res) | ||
| 977 | return -ENODEV; | ||
| 978 | |||
| 979 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | ||
| 980 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | ||
| 981 | return -EBUSY; | ||
| 982 | } | ||
| 983 | |||
| 984 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
| 985 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
| 986 | err = -EBUSY; | ||
| 987 | goto ermrdmars; | ||
| 988 | } | ||
| 989 | |||
| 990 | err = -ENOMEM; | ||
| 913 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | 991 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
| 914 | if (!shdev) { | 992 | if (!shdev) { |
| 915 | dev_err(&pdev->dev, "No enough memory\n"); | 993 | dev_err(&pdev->dev, "Not enough memory\n"); |
| 916 | return -ENOMEM; | 994 | goto ealloc; |
| 995 | } | ||
| 996 | |||
| 997 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | ||
| 998 | if (!shdev->chan_reg) | ||
| 999 | goto emapchan; | ||
| 1000 | if (dmars) { | ||
| 1001 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | ||
| 1002 | if (!shdev->dmars) | ||
| 1003 | goto emapdmars; | ||
| 917 | } | 1004 | } |
| 918 | 1005 | ||
| 919 | /* platform data */ | 1006 | /* platform data */ |
| 920 | memcpy(&shdev->pdata, pdev->dev.platform_data, | 1007 | shdev->pdata = pdata; |
| 921 | sizeof(struct sh_dmae_pdata)); | 1008 | |
| 1009 | pm_runtime_enable(&pdev->dev); | ||
| 1010 | pm_runtime_get_sync(&pdev->dev); | ||
| 922 | 1011 | ||
| 923 | /* reset dma controller */ | 1012 | /* reset dma controller */ |
| 924 | err = sh_dmae_rst(0); | 1013 | err = sh_dmae_rst(shdev); |
| 925 | if (err) | 1014 | if (err) |
| 926 | goto rst_err; | 1015 | goto rst_err; |
| 927 | 1016 | ||
| 928 | /* SH7780/85/23 has DMAOR1 */ | ||
| 929 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
| 930 | err = sh_dmae_rst(1); | ||
| 931 | if (err) | ||
| 932 | goto rst_err; | ||
| 933 | } | ||
| 934 | |||
| 935 | INIT_LIST_HEAD(&shdev->common.channels); | 1017 | INIT_LIST_HEAD(&shdev->common.channels); |
| 936 | 1018 | ||
| 937 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 1019 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
| 938 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | 1020 | if (dmars) |
| 1021 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
| 939 | 1022 | ||
| 940 | shdev->common.device_alloc_chan_resources | 1023 | shdev->common.device_alloc_chan_resources |
| 941 | = sh_dmae_alloc_chan_resources; | 1024 | = sh_dmae_alloc_chan_resources; |
| @@ -950,37 +1033,72 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 950 | 1033 | ||
| 951 | shdev->common.dev = &pdev->dev; | 1034 | shdev->common.dev = &pdev->dev; |
| 952 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 1035 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
| 953 | shdev->common.copy_align = 5; | 1036 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; |
| 954 | 1037 | ||
| 955 | #if defined(CONFIG_CPU_SH4) | 1038 | #if defined(CONFIG_CPU_SH4) |
| 956 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ | 1039 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
| 957 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 1040 | |
| 1041 | if (!chanirq_res) | ||
| 1042 | chanirq_res = errirq_res; | ||
| 1043 | else | ||
| 1044 | irqres++; | ||
| 1045 | |||
| 1046 | if (chanirq_res == errirq_res || | ||
| 1047 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
| 958 | irqflags = IRQF_SHARED; | 1048 | irqflags = IRQF_SHARED; |
| 959 | eirq[0] = DMTE0_IRQ; | 1049 | |
| 960 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 1050 | errirq = errirq_res->start; |
| 961 | eirq[1] = DMTE6_IRQ; | 1051 | |
| 962 | #endif | 1052 | err = request_irq(errirq, sh_dmae_err, irqflags, |
| 1053 | "DMAC Address Error", shdev); | ||
| 1054 | if (err) { | ||
| 1055 | dev_err(&pdev->dev, | ||
| 1056 | "DMA failed requesting irq #%d, error %d\n", | ||
| 1057 | errirq, err); | ||
| 1058 | goto eirq_err; | ||
| 963 | } | 1059 | } |
| 964 | 1060 | ||
| 965 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { | 1061 | #else |
| 966 | err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, | 1062 | chanirq_res = errirq_res; |
| 967 | "DMAC Address Error", shdev); | 1063 | #endif /* CONFIG_CPU_SH4 */ |
| 968 | if (err) { | 1064 | |
| 969 | dev_err(&pdev->dev, "DMA device request_irq" | 1065 | if (chanirq_res->start == chanirq_res->end && |
| 970 | "error (irq %d) with return %d\n", | 1066 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
| 971 | eirq[ecnt], err); | 1067 | /* Special case - all multiplexed */ |
| 972 | goto eirq_err; | 1068 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
| 1069 | chan_irq[irq_cnt] = chanirq_res->start; | ||
| 1070 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
| 973 | } | 1071 | } |
| 1072 | } else { | ||
| 1073 | do { | ||
| 1074 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
| 1075 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
| 1076 | IORESOURCE_IRQ_SHAREABLE) | ||
| 1077 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
| 1078 | else | ||
| 1079 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
| 1080 | dev_dbg(&pdev->dev, | ||
| 1081 | "Found IRQ %d for channel %d\n", | ||
| 1082 | i, irq_cnt); | ||
| 1083 | chan_irq[irq_cnt++] = i; | ||
| 1084 | } | ||
| 1085 | chanirq_res = platform_get_resource(pdev, | ||
| 1086 | IORESOURCE_IRQ, ++irqres); | ||
| 1087 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
| 974 | } | 1088 | } |
| 975 | #endif /* CONFIG_CPU_SH4 */ | 1089 | |
| 1090 | if (irq_cnt < pdata->channel_num) | ||
| 1091 | goto eirqres; | ||
| 976 | 1092 | ||
| 977 | /* Create DMA Channel */ | 1093 | /* Create DMA Channel */ |
| 978 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { | 1094 | for (i = 0; i < pdata->channel_num; i++) { |
| 979 | err = sh_dmae_chan_probe(shdev, cnt); | 1095 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); |
| 980 | if (err) | 1096 | if (err) |
| 981 | goto chan_probe_err; | 1097 | goto chan_probe_err; |
| 982 | } | 1098 | } |
| 983 | 1099 | ||
| 1100 | pm_runtime_put(&pdev->dev); | ||
| 1101 | |||
| 984 | platform_set_drvdata(pdev, shdev); | 1102 | platform_set_drvdata(pdev, shdev); |
| 985 | dma_async_device_register(&shdev->common); | 1103 | dma_async_device_register(&shdev->common); |
| 986 | 1104 | ||
| @@ -988,13 +1106,24 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 988 | 1106 | ||
| 989 | chan_probe_err: | 1107 | chan_probe_err: |
| 990 | sh_dmae_chan_remove(shdev); | 1108 | sh_dmae_chan_remove(shdev); |
| 991 | 1109 | eirqres: | |
| 1110 | #if defined(CONFIG_CPU_SH4) | ||
| 1111 | free_irq(errirq, shdev); | ||
| 992 | eirq_err: | 1112 | eirq_err: |
| 993 | for (ecnt-- ; ecnt >= 0; ecnt--) | 1113 | #endif |
| 994 | free_irq(eirq[ecnt], shdev); | ||
| 995 | |||
| 996 | rst_err: | 1114 | rst_err: |
| 1115 | pm_runtime_put(&pdev->dev); | ||
| 1116 | if (dmars) | ||
| 1117 | iounmap(shdev->dmars); | ||
| 1118 | emapdmars: | ||
| 1119 | iounmap(shdev->chan_reg); | ||
| 1120 | emapchan: | ||
| 997 | kfree(shdev); | 1121 | kfree(shdev); |
| 1122 | ealloc: | ||
| 1123 | if (dmars) | ||
| 1124 | release_mem_region(dmars->start, resource_size(dmars)); | ||
| 1125 | ermrdmars: | ||
| 1126 | release_mem_region(chan->start, resource_size(chan)); | ||
| 998 | 1127 | ||
| 999 | return err; | 1128 | return err; |
| 1000 | } | 1129 | } |
| @@ -1002,36 +1131,39 @@ rst_err: | |||
| 1002 | static int __exit sh_dmae_remove(struct platform_device *pdev) | 1131 | static int __exit sh_dmae_remove(struct platform_device *pdev) |
| 1003 | { | 1132 | { |
| 1004 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1133 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
| 1134 | struct resource *res; | ||
| 1135 | int errirq = platform_get_irq(pdev, 0); | ||
| 1005 | 1136 | ||
| 1006 | dma_async_device_unregister(&shdev->common); | 1137 | dma_async_device_unregister(&shdev->common); |
| 1007 | 1138 | ||
| 1008 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 1139 | if (errirq > 0) |
| 1009 | free_irq(DMTE0_IRQ, shdev); | 1140 | free_irq(errirq, shdev); |
| 1010 | #if defined(DMTE6_IRQ) | ||
| 1011 | free_irq(DMTE6_IRQ, shdev); | ||
| 1012 | #endif | ||
| 1013 | } | ||
| 1014 | 1141 | ||
| 1015 | /* channel data remove */ | 1142 | /* channel data remove */ |
| 1016 | sh_dmae_chan_remove(shdev); | 1143 | sh_dmae_chan_remove(shdev); |
| 1017 | 1144 | ||
| 1018 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { | 1145 | pm_runtime_disable(&pdev->dev); |
| 1019 | free_irq(DMAE0_IRQ, shdev); | 1146 | |
| 1020 | #if defined(DMAE1_IRQ) | 1147 | if (shdev->dmars) |
| 1021 | free_irq(DMAE1_IRQ, shdev); | 1148 | iounmap(shdev->dmars); |
| 1022 | #endif | 1149 | iounmap(shdev->chan_reg); |
| 1023 | } | 1150 | |
| 1024 | kfree(shdev); | 1151 | kfree(shdev); |
| 1025 | 1152 | ||
| 1153 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1154 | if (res) | ||
| 1155 | release_mem_region(res->start, resource_size(res)); | ||
| 1156 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 1157 | if (res) | ||
| 1158 | release_mem_region(res->start, resource_size(res)); | ||
| 1159 | |||
| 1026 | return 0; | 1160 | return 0; |
| 1027 | } | 1161 | } |
| 1028 | 1162 | ||
| 1029 | static void sh_dmae_shutdown(struct platform_device *pdev) | 1163 | static void sh_dmae_shutdown(struct platform_device *pdev) |
| 1030 | { | 1164 | { |
| 1031 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1165 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
| 1032 | sh_dmae_ctl_stop(0); | 1166 | sh_dmae_ctl_stop(shdev); |
| 1033 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
| 1034 | sh_dmae_ctl_stop(1); | ||
| 1035 | } | 1167 | } |
| 1036 | 1168 | ||
| 1037 | static struct platform_driver sh_dmae_driver = { | 1169 | static struct platform_driver sh_dmae_driver = { |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 7e227f3c87c4..153609a1e96c 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
| @@ -17,23 +17,9 @@ | |||
| 17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
| 19 | 19 | ||
| 20 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ | 20 | #include <asm/dmaengine.h> |
| 21 | |||
| 22 | struct sh_dmae_regs { | ||
| 23 | u32 sar; /* SAR / source address */ | ||
| 24 | u32 dar; /* DAR / destination address */ | ||
| 25 | u32 tcr; /* TCR / transfer count */ | ||
| 26 | }; | ||
| 27 | 21 | ||
| 28 | struct sh_desc { | 22 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ |
| 29 | struct sh_dmae_regs hw; | ||
| 30 | struct list_head node; | ||
| 31 | struct dma_async_tx_descriptor async_tx; | ||
| 32 | enum dma_data_direction direction; | ||
| 33 | dma_cookie_t cookie; | ||
| 34 | int chunks; | ||
| 35 | int mark; | ||
| 36 | }; | ||
| 37 | 23 | ||
| 38 | struct device; | 24 | struct device; |
| 39 | 25 | ||
| @@ -47,14 +33,18 @@ struct sh_dmae_chan { | |||
| 47 | struct tasklet_struct tasklet; /* Tasklet */ | 33 | struct tasklet_struct tasklet; /* Tasklet */ |
| 48 | int descs_allocated; /* desc count */ | 34 | int descs_allocated; /* desc count */ |
| 49 | int xmit_shift; /* log_2(bytes_per_xfer) */ | 35 | int xmit_shift; /* log_2(bytes_per_xfer) */ |
| 36 | int irq; | ||
| 50 | int id; /* Raw id of this channel */ | 37 | int id; /* Raw id of this channel */ |
| 38 | u32 __iomem *base; | ||
| 51 | char dev_id[16]; /* unique name per DMAC of channel */ | 39 | char dev_id[16]; /* unique name per DMAC of channel */ |
| 52 | }; | 40 | }; |
| 53 | 41 | ||
| 54 | struct sh_dmae_device { | 42 | struct sh_dmae_device { |
| 55 | struct dma_device common; | 43 | struct dma_device common; |
| 56 | struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; | 44 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; |
| 57 | struct sh_dmae_pdata pdata; | 45 | struct sh_dmae_pdata *pdata; |
| 46 | u32 __iomem *chan_reg; | ||
| 47 | u16 __iomem *dmars; | ||
| 58 | }; | 48 | }; |
| 59 | 49 | ||
| 60 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) | 50 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 746e07033dce..d6ff73395623 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
| @@ -1009,6 +1009,10 @@ config SERIAL_SH_SCI_CONSOLE | |||
| 1009 | depends on SERIAL_SH_SCI=y | 1009 | depends on SERIAL_SH_SCI=y |
| 1010 | select SERIAL_CORE_CONSOLE | 1010 | select SERIAL_CORE_CONSOLE |
| 1011 | 1011 | ||
| 1012 | config SERIAL_SH_SCI_DMA | ||
| 1013 | bool "DMA support" | ||
| 1014 | depends on SERIAL_SH_SCI && SH_DMAE && EXPERIMENTAL | ||
| 1015 | |||
| 1012 | config SERIAL_PNX8XXX | 1016 | config SERIAL_PNX8XXX |
| 1013 | bool "Enable PNX8XXX SoCs' UART Support" | 1017 | bool "Enable PNX8XXX SoCs' UART Support" |
| 1014 | depends on MIPS && (SOC_PNX8550 || SOC_PNX833X) | 1018 | depends on MIPS && (SOC_PNX8550 || SOC_PNX833X) |
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 42f3333c4ad0..980f39449ee5 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
| @@ -48,6 +48,9 @@ | |||
| 48 | #include <linux/ctype.h> | 48 | #include <linux/ctype.h> |
| 49 | #include <linux/err.h> | 49 | #include <linux/err.h> |
| 50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
| 51 | #include <linux/dmaengine.h> | ||
| 52 | #include <linux/scatterlist.h> | ||
| 53 | #include <linux/timer.h> | ||
| 51 | 54 | ||
| 52 | #ifdef CONFIG_SUPERH | 55 | #ifdef CONFIG_SUPERH |
| 53 | #include <asm/sh_bios.h> | 56 | #include <asm/sh_bios.h> |
| @@ -84,6 +87,27 @@ struct sci_port { | |||
| 84 | struct clk *dclk; | 87 | struct clk *dclk; |
| 85 | 88 | ||
| 86 | struct list_head node; | 89 | struct list_head node; |
| 90 | struct dma_chan *chan_tx; | ||
| 91 | struct dma_chan *chan_rx; | ||
| 92 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 93 | struct device *dma_dev; | ||
| 94 | enum sh_dmae_slave_chan_id slave_tx; | ||
| 95 | enum sh_dmae_slave_chan_id slave_rx; | ||
| 96 | struct dma_async_tx_descriptor *desc_tx; | ||
| 97 | struct dma_async_tx_descriptor *desc_rx[2]; | ||
| 98 | dma_cookie_t cookie_tx; | ||
| 99 | dma_cookie_t cookie_rx[2]; | ||
| 100 | dma_cookie_t active_rx; | ||
| 101 | struct scatterlist sg_tx; | ||
| 102 | unsigned int sg_len_tx; | ||
| 103 | struct scatterlist sg_rx[2]; | ||
| 104 | size_t buf_len_rx; | ||
| 105 | struct sh_dmae_slave param_tx; | ||
| 106 | struct sh_dmae_slave param_rx; | ||
| 107 | struct work_struct work_tx; | ||
| 108 | struct work_struct work_rx; | ||
| 109 | struct timer_list rx_timer; | ||
| 110 | #endif | ||
| 87 | }; | 111 | }; |
| 88 | 112 | ||
| 89 | struct sh_sci_priv { | 113 | struct sh_sci_priv { |
| @@ -269,29 +293,44 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | |||
| 269 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 293 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
| 270 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | 294 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 271 | defined(CONFIG_CPU_SUBTYPE_SH7786) | 295 | defined(CONFIG_CPU_SUBTYPE_SH7786) |
| 272 | static inline int scif_txroom(struct uart_port *port) | 296 | static int scif_txfill(struct uart_port *port) |
| 273 | { | 297 | { |
| 274 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); | 298 | return sci_in(port, SCTFDR) & 0xff; |
| 275 | } | 299 | } |
| 276 | 300 | ||
| 277 | static inline int scif_rxroom(struct uart_port *port) | 301 | static int scif_txroom(struct uart_port *port) |
| 302 | { | ||
| 303 | return SCIF_TXROOM_MAX - scif_txfill(port); | ||
| 304 | } | ||
| 305 | |||
| 306 | static int scif_rxfill(struct uart_port *port) | ||
| 278 | { | 307 | { |
| 279 | return sci_in(port, SCRFDR) & 0xff; | 308 | return sci_in(port, SCRFDR) & 0xff; |
| 280 | } | 309 | } |
| 281 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | 310 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) |
| 282 | static inline int scif_txroom(struct uart_port *port) | 311 | static int scif_txfill(struct uart_port *port) |
| 283 | { | 312 | { |
| 284 | if ((port->mapbase == 0xffe00000) || | 313 | if (port->mapbase == 0xffe00000 || |
| 285 | (port->mapbase == 0xffe08000)) { | 314 | port->mapbase == 0xffe08000) |
| 286 | /* SCIF0/1*/ | 315 | /* SCIF0/1*/ |
| 287 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); | 316 | return sci_in(port, SCTFDR) & 0xff; |
| 288 | } else { | 317 | else |
| 289 | /* SCIF2 */ | 318 | /* SCIF2 */ |
| 290 | return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); | 319 | return sci_in(port, SCFDR) >> 8; |
| 291 | } | ||
| 292 | } | 320 | } |
| 293 | 321 | ||
| 294 | static inline int scif_rxroom(struct uart_port *port) | 322 | static int scif_txroom(struct uart_port *port) |
| 323 | { | ||
| 324 | if (port->mapbase == 0xffe00000 || | ||
| 325 | port->mapbase == 0xffe08000) | ||
| 326 | /* SCIF0/1*/ | ||
| 327 | return SCIF_TXROOM_MAX - scif_txfill(port); | ||
| 328 | else | ||
| 329 | /* SCIF2 */ | ||
| 330 | return SCIF2_TXROOM_MAX - scif_txfill(port); | ||
| 331 | } | ||
| 332 | |||
| 333 | static int scif_rxfill(struct uart_port *port) | ||
| 295 | { | 334 | { |
| 296 | if ((port->mapbase == 0xffe00000) || | 335 | if ((port->mapbase == 0xffe00000) || |
| 297 | (port->mapbase == 0xffe08000)) { | 336 | (port->mapbase == 0xffe08000)) { |
| @@ -303,23 +342,33 @@ static inline int scif_rxroom(struct uart_port *port) | |||
| 303 | } | 342 | } |
| 304 | } | 343 | } |
| 305 | #else | 344 | #else |
| 306 | static inline int scif_txroom(struct uart_port *port) | 345 | static int scif_txfill(struct uart_port *port) |
| 346 | { | ||
| 347 | return sci_in(port, SCFDR) >> 8; | ||
| 348 | } | ||
| 349 | |||
| 350 | static int scif_txroom(struct uart_port *port) | ||
| 307 | { | 351 | { |
| 308 | return SCIF_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); | 352 | return SCIF_TXROOM_MAX - scif_txfill(port); |
| 309 | } | 353 | } |
| 310 | 354 | ||
| 311 | static inline int scif_rxroom(struct uart_port *port) | 355 | static int scif_rxfill(struct uart_port *port) |
| 312 | { | 356 | { |
| 313 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; | 357 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; |
| 314 | } | 358 | } |
| 315 | #endif | 359 | #endif |
| 316 | 360 | ||
| 317 | static inline int sci_txroom(struct uart_port *port) | 361 | static int sci_txfill(struct uart_port *port) |
| 318 | { | 362 | { |
| 319 | return (sci_in(port, SCxSR) & SCI_TDRE) != 0; | 363 | return !(sci_in(port, SCxSR) & SCI_TDRE); |
| 320 | } | 364 | } |
| 321 | 365 | ||
| 322 | static inline int sci_rxroom(struct uart_port *port) | 366 | static int sci_txroom(struct uart_port *port) |
| 367 | { | ||
| 368 | return !sci_txfill(port); | ||
| 369 | } | ||
| 370 | |||
| 371 | static int sci_rxfill(struct uart_port *port) | ||
| 323 | { | 372 | { |
| 324 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; | 373 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; |
| 325 | } | 374 | } |
| @@ -406,9 +455,9 @@ static inline void sci_receive_chars(struct uart_port *port) | |||
| 406 | 455 | ||
| 407 | while (1) { | 456 | while (1) { |
| 408 | if (port->type == PORT_SCI) | 457 | if (port->type == PORT_SCI) |
| 409 | count = sci_rxroom(port); | 458 | count = sci_rxfill(port); |
| 410 | else | 459 | else |
| 411 | count = scif_rxroom(port); | 460 | count = scif_rxfill(port); |
| 412 | 461 | ||
| 413 | /* Don't copy more bytes than there is room for in the buffer */ | 462 | /* Don't copy more bytes than there is room for in the buffer */ |
| 414 | count = tty_buffer_request_room(tty, count); | 463 | count = tty_buffer_request_room(tty, count); |
| @@ -453,10 +502,10 @@ static inline void sci_receive_chars(struct uart_port *port) | |||
| 453 | } | 502 | } |
| 454 | 503 | ||
| 455 | /* Store data and status */ | 504 | /* Store data and status */ |
| 456 | if (status&SCxSR_FER(port)) { | 505 | if (status & SCxSR_FER(port)) { |
| 457 | flag = TTY_FRAME; | 506 | flag = TTY_FRAME; |
| 458 | dev_notice(port->dev, "frame error\n"); | 507 | dev_notice(port->dev, "frame error\n"); |
| 459 | } else if (status&SCxSR_PER(port)) { | 508 | } else if (status & SCxSR_PER(port)) { |
| 460 | flag = TTY_PARITY; | 509 | flag = TTY_PARITY; |
| 461 | dev_notice(port->dev, "parity error\n"); | 510 | dev_notice(port->dev, "parity error\n"); |
| 462 | } else | 511 | } else |
| @@ -618,13 +667,39 @@ static inline int sci_handle_breaks(struct uart_port *port) | |||
| 618 | return copied; | 667 | return copied; |
| 619 | } | 668 | } |
| 620 | 669 | ||
| 621 | static irqreturn_t sci_rx_interrupt(int irq, void *port) | 670 | static irqreturn_t sci_rx_interrupt(int irq, void *ptr) |
| 622 | { | 671 | { |
| 672 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 673 | struct uart_port *port = ptr; | ||
| 674 | struct sci_port *s = to_sci_port(port); | ||
| 675 | |||
| 676 | if (s->chan_rx) { | ||
| 677 | unsigned long tout; | ||
| 678 | u16 scr = sci_in(port, SCSCR); | ||
| 679 | u16 ssr = sci_in(port, SCxSR); | ||
| 680 | |||
| 681 | /* Disable future Rx interrupts */ | ||
| 682 | sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); | ||
| 683 | /* Clear current interrupt */ | ||
| 684 | sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); | ||
| 685 | /* Calculate delay for 1.5 DMA buffers */ | ||
| 686 | tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / | ||
| 687 | port->fifosize / 2; | ||
| 688 | dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n", | ||
| 689 | tout * 1000 / HZ); | ||
| 690 | if (tout < 2) | ||
| 691 | tout = 2; | ||
| 692 | mod_timer(&s->rx_timer, jiffies + tout); | ||
| 693 | |||
| 694 | return IRQ_HANDLED; | ||
| 695 | } | ||
| 696 | #endif | ||
| 697 | |||
| 623 | /* I think sci_receive_chars has to be called irrespective | 698 | /* I think sci_receive_chars has to be called irrespective |
| 624 | * of whether the I_IXOFF is set, otherwise, how is the interrupt | 699 | * of whether the I_IXOFF is set, otherwise, how is the interrupt |
| 625 | * to be disabled? | 700 | * to be disabled? |
| 626 | */ | 701 | */ |
| 627 | sci_receive_chars(port); | 702 | sci_receive_chars(ptr); |
| 628 | 703 | ||
| 629 | return IRQ_HANDLED; | 704 | return IRQ_HANDLED; |
| 630 | } | 705 | } |
| @@ -680,6 +755,7 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
| 680 | { | 755 | { |
| 681 | unsigned short ssr_status, scr_status, err_enabled; | 756 | unsigned short ssr_status, scr_status, err_enabled; |
| 682 | struct uart_port *port = ptr; | 757 | struct uart_port *port = ptr; |
| 758 | struct sci_port *s = to_sci_port(port); | ||
| 683 | irqreturn_t ret = IRQ_NONE; | 759 | irqreturn_t ret = IRQ_NONE; |
| 684 | 760 | ||
| 685 | ssr_status = sci_in(port, SCxSR); | 761 | ssr_status = sci_in(port, SCxSR); |
| @@ -687,10 +763,15 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
| 687 | err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); | 763 | err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); |
| 688 | 764 | ||
| 689 | /* Tx Interrupt */ | 765 | /* Tx Interrupt */ |
| 690 | if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE)) | 766 | if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) && |
| 767 | !s->chan_tx) | ||
| 691 | ret = sci_tx_interrupt(irq, ptr); | 768 | ret = sci_tx_interrupt(irq, ptr); |
| 692 | /* Rx Interrupt */ | 769 | /* |
| 693 | if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE)) | 770 | * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / |
| 771 | * DR flags | ||
| 772 | */ | ||
| 773 | if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && | ||
| 774 | (scr_status & SCI_CTRL_FLAGS_RIE)) | ||
| 694 | ret = sci_rx_interrupt(irq, ptr); | 775 | ret = sci_rx_interrupt(irq, ptr); |
| 695 | /* Error Interrupt */ | 776 | /* Error Interrupt */ |
| 696 | if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) | 777 | if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) |
| @@ -699,6 +780,10 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
| 699 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) | 780 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) |
| 700 | ret = sci_br_interrupt(irq, ptr); | 781 | ret = sci_br_interrupt(irq, ptr); |
| 701 | 782 | ||
| 783 | WARN_ONCE(ret == IRQ_NONE, | ||
| 784 | "%s: %d IRQ %d, status %x, control %x\n", __func__, | ||
| 785 | irq, port->line, ssr_status, scr_status); | ||
| 786 | |||
| 702 | return ret; | 787 | return ret; |
| 703 | } | 788 | } |
| 704 | 789 | ||
| @@ -800,7 +885,9 @@ static void sci_free_irq(struct sci_port *port) | |||
| 800 | static unsigned int sci_tx_empty(struct uart_port *port) | 885 | static unsigned int sci_tx_empty(struct uart_port *port) |
| 801 | { | 886 | { |
| 802 | unsigned short status = sci_in(port, SCxSR); | 887 | unsigned short status = sci_in(port, SCxSR); |
| 803 | return status & SCxSR_TEND(port) ? TIOCSER_TEMT : 0; | 888 | unsigned short in_tx_fifo = scif_txfill(port); |
| 889 | |||
| 890 | return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; | ||
| 804 | } | 891 | } |
| 805 | 892 | ||
| 806 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) | 893 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) |
| @@ -812,16 +899,297 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
| 812 | 899 | ||
| 813 | static unsigned int sci_get_mctrl(struct uart_port *port) | 900 | static unsigned int sci_get_mctrl(struct uart_port *port) |
| 814 | { | 901 | { |
| 815 | /* This routine is used for geting signals of: DTR, DCD, DSR, RI, | 902 | /* This routine is used for getting signals of: DTR, DCD, DSR, RI, |
| 816 | and CTS/RTS */ | 903 | and CTS/RTS */ |
| 817 | 904 | ||
| 818 | return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; | 905 | return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; |
| 819 | } | 906 | } |
| 820 | 907 | ||
| 908 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 909 | static void sci_dma_tx_complete(void *arg) | ||
| 910 | { | ||
| 911 | struct sci_port *s = arg; | ||
| 912 | struct uart_port *port = &s->port; | ||
| 913 | struct circ_buf *xmit = &port->state->xmit; | ||
| 914 | unsigned long flags; | ||
| 915 | |||
| 916 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 917 | |||
| 918 | spin_lock_irqsave(&port->lock, flags); | ||
| 919 | |||
| 920 | xmit->tail += s->sg_tx.length; | ||
| 921 | xmit->tail &= UART_XMIT_SIZE - 1; | ||
| 922 | |||
| 923 | port->icount.tx += s->sg_tx.length; | ||
| 924 | |||
| 925 | async_tx_ack(s->desc_tx); | ||
| 926 | s->cookie_tx = -EINVAL; | ||
| 927 | s->desc_tx = NULL; | ||
| 928 | |||
| 929 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 930 | |||
| 931 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
| 932 | uart_write_wakeup(port); | ||
| 933 | |||
| 934 | if (uart_circ_chars_pending(xmit)) | ||
| 935 | schedule_work(&s->work_tx); | ||
| 936 | } | ||
| 937 | |||
| 938 | /* Locking: called with port lock held */ | ||
| 939 | static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty, | ||
| 940 | size_t count) | ||
| 941 | { | ||
| 942 | struct uart_port *port = &s->port; | ||
| 943 | int i, active, room; | ||
| 944 | |||
| 945 | room = tty_buffer_request_room(tty, count); | ||
| 946 | |||
| 947 | if (s->active_rx == s->cookie_rx[0]) { | ||
| 948 | active = 0; | ||
| 949 | } else if (s->active_rx == s->cookie_rx[1]) { | ||
| 950 | active = 1; | ||
| 951 | } else { | ||
| 952 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); | ||
| 953 | return 0; | ||
| 954 | } | ||
| 955 | |||
| 956 | if (room < count) | ||
| 957 | dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", | ||
| 958 | count - room); | ||
| 959 | if (!room) | ||
| 960 | return room; | ||
| 961 | |||
| 962 | for (i = 0; i < room; i++) | ||
| 963 | tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i], | ||
| 964 | TTY_NORMAL); | ||
| 965 | |||
| 966 | port->icount.rx += room; | ||
| 967 | |||
| 968 | return room; | ||
| 969 | } | ||
| 970 | |||
| 971 | static void sci_dma_rx_complete(void *arg) | ||
| 972 | { | ||
| 973 | struct sci_port *s = arg; | ||
| 974 | struct uart_port *port = &s->port; | ||
| 975 | struct tty_struct *tty = port->state->port.tty; | ||
| 976 | unsigned long flags; | ||
| 977 | int count; | ||
| 978 | |||
| 979 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 980 | |||
| 981 | spin_lock_irqsave(&port->lock, flags); | ||
| 982 | |||
| 983 | count = sci_dma_rx_push(s, tty, s->buf_len_rx); | ||
| 984 | |||
| 985 | mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); | ||
| 986 | |||
| 987 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 988 | |||
| 989 | if (count) | ||
| 990 | tty_flip_buffer_push(tty); | ||
| 991 | |||
| 992 | schedule_work(&s->work_rx); | ||
| 993 | } | ||
| 994 | |||
| 995 | static void sci_start_rx(struct uart_port *port); | ||
| 996 | static void sci_start_tx(struct uart_port *port); | ||
| 997 | |||
| 998 | static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) | ||
| 999 | { | ||
| 1000 | struct dma_chan *chan = s->chan_rx; | ||
| 1001 | struct uart_port *port = &s->port; | ||
| 1002 | |||
| 1003 | s->chan_rx = NULL; | ||
| 1004 | s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; | ||
| 1005 | dma_release_channel(chan); | ||
| 1006 | dma_free_coherent(port->dev, s->buf_len_rx * 2, | ||
| 1007 | sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); | ||
| 1008 | if (enable_pio) | ||
| 1009 | sci_start_rx(port); | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) | ||
| 1013 | { | ||
| 1014 | struct dma_chan *chan = s->chan_tx; | ||
| 1015 | struct uart_port *port = &s->port; | ||
| 1016 | |||
| 1017 | s->chan_tx = NULL; | ||
| 1018 | s->cookie_tx = -EINVAL; | ||
| 1019 | dma_release_channel(chan); | ||
| 1020 | if (enable_pio) | ||
| 1021 | sci_start_tx(port); | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | static void sci_submit_rx(struct sci_port *s) | ||
| 1025 | { | ||
| 1026 | struct dma_chan *chan = s->chan_rx; | ||
| 1027 | int i; | ||
| 1028 | |||
| 1029 | for (i = 0; i < 2; i++) { | ||
| 1030 | struct scatterlist *sg = &s->sg_rx[i]; | ||
| 1031 | struct dma_async_tx_descriptor *desc; | ||
| 1032 | |||
| 1033 | desc = chan->device->device_prep_slave_sg(chan, | ||
| 1034 | sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); | ||
| 1035 | |||
| 1036 | if (desc) { | ||
| 1037 | s->desc_rx[i] = desc; | ||
| 1038 | desc->callback = sci_dma_rx_complete; | ||
| 1039 | desc->callback_param = s; | ||
| 1040 | s->cookie_rx[i] = desc->tx_submit(desc); | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | if (!desc || s->cookie_rx[i] < 0) { | ||
| 1044 | if (i) { | ||
| 1045 | async_tx_ack(s->desc_rx[0]); | ||
| 1046 | s->cookie_rx[0] = -EINVAL; | ||
| 1047 | } | ||
| 1048 | if (desc) { | ||
| 1049 | async_tx_ack(desc); | ||
| 1050 | s->cookie_rx[i] = -EINVAL; | ||
| 1051 | } | ||
| 1052 | dev_warn(s->port.dev, | ||
| 1053 | "failed to re-start DMA, using PIO\n"); | ||
| 1054 | sci_rx_dma_release(s, true); | ||
| 1055 | return; | ||
| 1056 | } | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | s->active_rx = s->cookie_rx[0]; | ||
| 1060 | |||
| 1061 | dma_async_issue_pending(chan); | ||
| 1062 | } | ||
| 1063 | |||
| 1064 | static void work_fn_rx(struct work_struct *work) | ||
| 1065 | { | ||
| 1066 | struct sci_port *s = container_of(work, struct sci_port, work_rx); | ||
| 1067 | struct uart_port *port = &s->port; | ||
| 1068 | struct dma_async_tx_descriptor *desc; | ||
| 1069 | int new; | ||
| 1070 | |||
| 1071 | if (s->active_rx == s->cookie_rx[0]) { | ||
| 1072 | new = 0; | ||
| 1073 | } else if (s->active_rx == s->cookie_rx[1]) { | ||
| 1074 | new = 1; | ||
| 1075 | } else { | ||
| 1076 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); | ||
| 1077 | return; | ||
| 1078 | } | ||
| 1079 | desc = s->desc_rx[new]; | ||
| 1080 | |||
| 1081 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != | ||
| 1082 | DMA_SUCCESS) { | ||
| 1083 | /* Handle incomplete DMA receive */ | ||
| 1084 | struct tty_struct *tty = port->state->port.tty; | ||
| 1085 | struct dma_chan *chan = s->chan_rx; | ||
| 1086 | struct sh_desc *sh_desc = container_of(desc, struct sh_desc, | ||
| 1087 | async_tx); | ||
| 1088 | unsigned long flags; | ||
| 1089 | int count; | ||
| 1090 | |||
| 1091 | chan->device->device_terminate_all(chan); | ||
| 1092 | dev_dbg(port->dev, "Read %u bytes with cookie %d\n", | ||
| 1093 | sh_desc->partial, sh_desc->cookie); | ||
| 1094 | |||
| 1095 | spin_lock_irqsave(&port->lock, flags); | ||
| 1096 | count = sci_dma_rx_push(s, tty, sh_desc->partial); | ||
| 1097 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 1098 | |||
| 1099 | if (count) | ||
| 1100 | tty_flip_buffer_push(tty); | ||
| 1101 | |||
| 1102 | sci_submit_rx(s); | ||
| 1103 | |||
| 1104 | return; | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | s->cookie_rx[new] = desc->tx_submit(desc); | ||
| 1108 | if (s->cookie_rx[new] < 0) { | ||
| 1109 | dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); | ||
| 1110 | sci_rx_dma_release(s, true); | ||
| 1111 | return; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__, | ||
| 1115 | s->cookie_rx[new], new); | ||
| 1116 | |||
| 1117 | s->active_rx = s->cookie_rx[!new]; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | static void work_fn_tx(struct work_struct *work) | ||
| 1121 | { | ||
| 1122 | struct sci_port *s = container_of(work, struct sci_port, work_tx); | ||
| 1123 | struct dma_async_tx_descriptor *desc; | ||
| 1124 | struct dma_chan *chan = s->chan_tx; | ||
| 1125 | struct uart_port *port = &s->port; | ||
| 1126 | struct circ_buf *xmit = &port->state->xmit; | ||
| 1127 | struct scatterlist *sg = &s->sg_tx; | ||
| 1128 | |||
| 1129 | /* | ||
| 1130 | * DMA is idle now. | ||
| 1131 | * Port xmit buffer is already mapped, and it is one page... Just adjust | ||
| 1132 | * offsets and lengths. Since it is a circular buffer, we have to | ||
| 1133 | * transmit till the end, and then the rest. Take the port lock to get a | ||
| 1134 | * consistent xmit buffer state. | ||
| 1135 | */ | ||
| 1136 | spin_lock_irq(&port->lock); | ||
| 1137 | sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); | ||
| 1138 | sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + | ||
| 1139 | sg->offset; | ||
| 1140 | sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), | ||
| 1141 | CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); | ||
| 1142 | sg->dma_length = sg->length; | ||
| 1143 | spin_unlock_irq(&port->lock); | ||
| 1144 | |||
| 1145 | BUG_ON(!sg->length); | ||
| 1146 | |||
| 1147 | desc = chan->device->device_prep_slave_sg(chan, | ||
| 1148 | sg, s->sg_len_tx, DMA_TO_DEVICE, | ||
| 1149 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 1150 | if (!desc) { | ||
| 1151 | /* switch to PIO */ | ||
| 1152 | sci_tx_dma_release(s, true); | ||
| 1153 | return; | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); | ||
| 1157 | |||
| 1158 | spin_lock_irq(&port->lock); | ||
| 1159 | s->desc_tx = desc; | ||
| 1160 | desc->callback = sci_dma_tx_complete; | ||
| 1161 | desc->callback_param = s; | ||
| 1162 | spin_unlock_irq(&port->lock); | ||
| 1163 | s->cookie_tx = desc->tx_submit(desc); | ||
| 1164 | if (s->cookie_tx < 0) { | ||
| 1165 | dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); | ||
| 1166 | /* switch to PIO */ | ||
| 1167 | sci_tx_dma_release(s, true); | ||
| 1168 | return; | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__, | ||
| 1172 | xmit->buf, xmit->tail, xmit->head, s->cookie_tx); | ||
| 1173 | |||
| 1174 | dma_async_issue_pending(chan); | ||
| 1175 | } | ||
| 1176 | #endif | ||
| 1177 | |||
| 821 | static void sci_start_tx(struct uart_port *port) | 1178 | static void sci_start_tx(struct uart_port *port) |
| 822 | { | 1179 | { |
| 823 | unsigned short ctrl; | 1180 | unsigned short ctrl; |
| 824 | 1181 | ||
| 1182 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1183 | struct sci_port *s = to_sci_port(port); | ||
| 1184 | |||
| 1185 | if (s->chan_tx) { | ||
| 1186 | if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) | ||
| 1187 | schedule_work(&s->work_tx); | ||
| 1188 | |||
| 1189 | return; | ||
| 1190 | } | ||
| 1191 | #endif | ||
| 1192 | |||
| 825 | /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ | 1193 | /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ |
| 826 | ctrl = sci_in(port, SCSCR); | 1194 | ctrl = sci_in(port, SCSCR); |
| 827 | ctrl |= SCI_CTRL_FLAGS_TIE; | 1195 | ctrl |= SCI_CTRL_FLAGS_TIE; |
| @@ -838,13 +1206,12 @@ static void sci_stop_tx(struct uart_port *port) | |||
| 838 | sci_out(port, SCSCR, ctrl); | 1206 | sci_out(port, SCSCR, ctrl); |
| 839 | } | 1207 | } |
| 840 | 1208 | ||
| 841 | static void sci_start_rx(struct uart_port *port, unsigned int tty_start) | 1209 | static void sci_start_rx(struct uart_port *port) |
| 842 | { | 1210 | { |
| 843 | unsigned short ctrl; | 1211 | unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; |
| 844 | 1212 | ||
| 845 | /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ | 1213 | /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ |
| 846 | ctrl = sci_in(port, SCSCR); | 1214 | ctrl |= sci_in(port, SCSCR); |
| 847 | ctrl |= SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; | ||
| 848 | sci_out(port, SCSCR, ctrl); | 1215 | sci_out(port, SCSCR, ctrl); |
| 849 | } | 1216 | } |
| 850 | 1217 | ||
| @@ -868,16 +1235,154 @@ static void sci_break_ctl(struct uart_port *port, int break_state) | |||
| 868 | /* Nothing here yet .. */ | 1235 | /* Nothing here yet .. */ |
| 869 | } | 1236 | } |
| 870 | 1237 | ||
| 1238 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1239 | static bool filter(struct dma_chan *chan, void *slave) | ||
| 1240 | { | ||
| 1241 | struct sh_dmae_slave *param = slave; | ||
| 1242 | |||
| 1243 | dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, | ||
| 1244 | param->slave_id); | ||
| 1245 | |||
| 1246 | if (param->dma_dev == chan->device->dev) { | ||
| 1247 | chan->private = param; | ||
| 1248 | return true; | ||
| 1249 | } else { | ||
| 1250 | return false; | ||
| 1251 | } | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | static void rx_timer_fn(unsigned long arg) | ||
| 1255 | { | ||
| 1256 | struct sci_port *s = (struct sci_port *)arg; | ||
| 1257 | struct uart_port *port = &s->port; | ||
| 1258 | |||
| 1259 | u16 scr = sci_in(port, SCSCR); | ||
| 1260 | sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); | ||
| 1261 | dev_dbg(port->dev, "DMA Rx timed out\n"); | ||
| 1262 | schedule_work(&s->work_rx); | ||
| 1263 | } | ||
| 1264 | |||
| 1265 | static void sci_request_dma(struct uart_port *port) | ||
| 1266 | { | ||
| 1267 | struct sci_port *s = to_sci_port(port); | ||
| 1268 | struct sh_dmae_slave *param; | ||
| 1269 | struct dma_chan *chan; | ||
| 1270 | dma_cap_mask_t mask; | ||
| 1271 | int nent; | ||
| 1272 | |||
| 1273 | dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, | ||
| 1274 | port->line, s->dma_dev); | ||
| 1275 | |||
| 1276 | if (!s->dma_dev) | ||
| 1277 | return; | ||
| 1278 | |||
| 1279 | dma_cap_zero(mask); | ||
| 1280 | dma_cap_set(DMA_SLAVE, mask); | ||
| 1281 | |||
| 1282 | param = &s->param_tx; | ||
| 1283 | |||
| 1284 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ | ||
| 1285 | param->slave_id = s->slave_tx; | ||
| 1286 | param->dma_dev = s->dma_dev; | ||
| 1287 | |||
| 1288 | s->cookie_tx = -EINVAL; | ||
| 1289 | chan = dma_request_channel(mask, filter, param); | ||
| 1290 | dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); | ||
| 1291 | if (chan) { | ||
| 1292 | s->chan_tx = chan; | ||
| 1293 | sg_init_table(&s->sg_tx, 1); | ||
| 1294 | /* UART circular tx buffer is an aligned page. */ | ||
| 1295 | BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); | ||
| 1296 | sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), | ||
| 1297 | UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); | ||
| 1298 | nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); | ||
| 1299 | if (!nent) | ||
| 1300 | sci_tx_dma_release(s, false); | ||
| 1301 | else | ||
| 1302 | dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, | ||
| 1303 | sg_dma_len(&s->sg_tx), | ||
| 1304 | port->state->xmit.buf, sg_dma_address(&s->sg_tx)); | ||
| 1305 | |||
| 1306 | s->sg_len_tx = nent; | ||
| 1307 | |||
| 1308 | INIT_WORK(&s->work_tx, work_fn_tx); | ||
| 1309 | } | ||
| 1310 | |||
| 1311 | param = &s->param_rx; | ||
| 1312 | |||
| 1313 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ | ||
| 1314 | param->slave_id = s->slave_rx; | ||
| 1315 | param->dma_dev = s->dma_dev; | ||
| 1316 | |||
| 1317 | chan = dma_request_channel(mask, filter, param); | ||
| 1318 | dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); | ||
| 1319 | if (chan) { | ||
| 1320 | dma_addr_t dma[2]; | ||
| 1321 | void *buf[2]; | ||
| 1322 | int i; | ||
| 1323 | |||
| 1324 | s->chan_rx = chan; | ||
| 1325 | |||
| 1326 | s->buf_len_rx = 2 * max(16, (int)port->fifosize); | ||
| 1327 | buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, | ||
| 1328 | &dma[0], GFP_KERNEL); | ||
| 1329 | |||
| 1330 | if (!buf[0]) { | ||
| 1331 | dev_warn(port->dev, | ||
| 1332 | "failed to allocate dma buffer, using PIO\n"); | ||
| 1333 | sci_rx_dma_release(s, true); | ||
| 1334 | return; | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | buf[1] = buf[0] + s->buf_len_rx; | ||
| 1338 | dma[1] = dma[0] + s->buf_len_rx; | ||
| 1339 | |||
| 1340 | for (i = 0; i < 2; i++) { | ||
| 1341 | struct scatterlist *sg = &s->sg_rx[i]; | ||
| 1342 | |||
| 1343 | sg_init_table(sg, 1); | ||
| 1344 | sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, | ||
| 1345 | (int)buf[i] & ~PAGE_MASK); | ||
| 1346 | sg->dma_address = dma[i]; | ||
| 1347 | sg->dma_length = sg->length; | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | INIT_WORK(&s->work_rx, work_fn_rx); | ||
| 1351 | setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); | ||
| 1352 | |||
| 1353 | sci_submit_rx(s); | ||
| 1354 | } | ||
| 1355 | } | ||
| 1356 | |||
| 1357 | static void sci_free_dma(struct uart_port *port) | ||
| 1358 | { | ||
| 1359 | struct sci_port *s = to_sci_port(port); | ||
| 1360 | |||
| 1361 | if (!s->dma_dev) | ||
| 1362 | return; | ||
| 1363 | |||
| 1364 | if (s->chan_tx) | ||
| 1365 | sci_tx_dma_release(s, false); | ||
| 1366 | if (s->chan_rx) | ||
| 1367 | sci_rx_dma_release(s, false); | ||
| 1368 | } | ||
| 1369 | #endif | ||
| 1370 | |||
| 871 | static int sci_startup(struct uart_port *port) | 1371 | static int sci_startup(struct uart_port *port) |
| 872 | { | 1372 | { |
| 873 | struct sci_port *s = to_sci_port(port); | 1373 | struct sci_port *s = to_sci_port(port); |
| 874 | 1374 | ||
| 1375 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 1376 | |||
| 875 | if (s->enable) | 1377 | if (s->enable) |
| 876 | s->enable(port); | 1378 | s->enable(port); |
| 877 | 1379 | ||
| 878 | sci_request_irq(s); | 1380 | sci_request_irq(s); |
| 1381 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1382 | sci_request_dma(port); | ||
| 1383 | #endif | ||
| 879 | sci_start_tx(port); | 1384 | sci_start_tx(port); |
| 880 | sci_start_rx(port, 1); | 1385 | sci_start_rx(port); |
| 881 | 1386 | ||
| 882 | return 0; | 1387 | return 0; |
| 883 | } | 1388 | } |
| @@ -886,8 +1391,13 @@ static void sci_shutdown(struct uart_port *port) | |||
| 886 | { | 1391 | { |
| 887 | struct sci_port *s = to_sci_port(port); | 1392 | struct sci_port *s = to_sci_port(port); |
| 888 | 1393 | ||
| 1394 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 1395 | |||
| 889 | sci_stop_rx(port); | 1396 | sci_stop_rx(port); |
| 890 | sci_stop_tx(port); | 1397 | sci_stop_tx(port); |
| 1398 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1399 | sci_free_dma(port); | ||
| 1400 | #endif | ||
| 891 | sci_free_irq(s); | 1401 | sci_free_irq(s); |
| 892 | 1402 | ||
| 893 | if (s->disable) | 1403 | if (s->disable) |
| @@ -937,6 +1447,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 937 | 1447 | ||
| 938 | sci_out(port, SCSMR, smr_val); | 1448 | sci_out(port, SCSMR, smr_val); |
| 939 | 1449 | ||
| 1450 | dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, | ||
| 1451 | SCSCR_INIT(port)); | ||
| 1452 | |||
| 940 | if (t > 0) { | 1453 | if (t > 0) { |
| 941 | if (t >= 256) { | 1454 | if (t >= 256) { |
| 942 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); | 1455 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); |
| @@ -954,7 +1467,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 954 | sci_out(port, SCSCR, SCSCR_INIT(port)); | 1467 | sci_out(port, SCSCR, SCSCR_INIT(port)); |
| 955 | 1468 | ||
| 956 | if ((termios->c_cflag & CREAD) != 0) | 1469 | if ((termios->c_cflag & CREAD) != 0) |
| 957 | sci_start_rx(port, 0); | 1470 | sci_start_rx(port); |
| 958 | } | 1471 | } |
| 959 | 1472 | ||
| 960 | static const char *sci_type(struct uart_port *port) | 1473 | static const char *sci_type(struct uart_port *port) |
| @@ -1049,19 +1562,21 @@ static void __devinit sci_init_single(struct platform_device *dev, | |||
| 1049 | unsigned int index, | 1562 | unsigned int index, |
| 1050 | struct plat_sci_port *p) | 1563 | struct plat_sci_port *p) |
| 1051 | { | 1564 | { |
| 1052 | sci_port->port.ops = &sci_uart_ops; | 1565 | struct uart_port *port = &sci_port->port; |
| 1053 | sci_port->port.iotype = UPIO_MEM; | 1566 | |
| 1054 | sci_port->port.line = index; | 1567 | port->ops = &sci_uart_ops; |
| 1568 | port->iotype = UPIO_MEM; | ||
| 1569 | port->line = index; | ||
| 1055 | 1570 | ||
| 1056 | switch (p->type) { | 1571 | switch (p->type) { |
| 1057 | case PORT_SCIFA: | 1572 | case PORT_SCIFA: |
| 1058 | sci_port->port.fifosize = 64; | 1573 | port->fifosize = 64; |
| 1059 | break; | 1574 | break; |
| 1060 | case PORT_SCIF: | 1575 | case PORT_SCIF: |
| 1061 | sci_port->port.fifosize = 16; | 1576 | port->fifosize = 16; |
| 1062 | break; | 1577 | break; |
| 1063 | default: | 1578 | default: |
| 1064 | sci_port->port.fifosize = 1; | 1579 | port->fifosize = 1; |
| 1065 | break; | 1580 | break; |
| 1066 | } | 1581 | } |
| 1067 | 1582 | ||
| @@ -1070,19 +1585,28 @@ static void __devinit sci_init_single(struct platform_device *dev, | |||
| 1070 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); | 1585 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); |
| 1071 | sci_port->enable = sci_clk_enable; | 1586 | sci_port->enable = sci_clk_enable; |
| 1072 | sci_port->disable = sci_clk_disable; | 1587 | sci_port->disable = sci_clk_disable; |
| 1073 | sci_port->port.dev = &dev->dev; | 1588 | port->dev = &dev->dev; |
| 1074 | } | 1589 | } |
| 1075 | 1590 | ||
| 1076 | sci_port->break_timer.data = (unsigned long)sci_port; | 1591 | sci_port->break_timer.data = (unsigned long)sci_port; |
| 1077 | sci_port->break_timer.function = sci_break_timer; | 1592 | sci_port->break_timer.function = sci_break_timer; |
| 1078 | init_timer(&sci_port->break_timer); | 1593 | init_timer(&sci_port->break_timer); |
| 1079 | 1594 | ||
| 1080 | sci_port->port.mapbase = p->mapbase; | 1595 | port->mapbase = p->mapbase; |
| 1081 | sci_port->port.membase = p->membase; | 1596 | port->membase = p->membase; |
| 1082 | 1597 | ||
| 1083 | sci_port->port.irq = p->irqs[SCIx_TXI_IRQ]; | 1598 | port->irq = p->irqs[SCIx_TXI_IRQ]; |
| 1084 | sci_port->port.flags = p->flags; | 1599 | port->flags = p->flags; |
| 1085 | sci_port->type = sci_port->port.type = p->type; | 1600 | sci_port->type = port->type = p->type; |
| 1601 | |||
| 1602 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1603 | sci_port->dma_dev = p->dma_dev; | ||
| 1604 | sci_port->slave_tx = p->dma_slave_tx; | ||
| 1605 | sci_port->slave_rx = p->dma_slave_rx; | ||
| 1606 | |||
| 1607 | dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__, | ||
| 1608 | p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); | ||
| 1609 | #endif | ||
| 1086 | 1610 | ||
| 1087 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); | 1611 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); |
| 1088 | } | 1612 | } |
