diff options
Diffstat (limited to 'drivers/char/synclink_gt.c')
-rw-r--r-- | drivers/char/synclink_gt.c | 186 |
1 files changed, 105 insertions, 81 deletions
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 8678f0c8699d..4561ce2fba6d 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -468,7 +468,7 @@ static unsigned int free_tbuf_count(struct slgt_info *info); | |||
468 | static unsigned int tbuf_bytes(struct slgt_info *info); | 468 | static unsigned int tbuf_bytes(struct slgt_info *info); |
469 | static void reset_tbufs(struct slgt_info *info); | 469 | static void reset_tbufs(struct slgt_info *info); |
470 | static void tdma_reset(struct slgt_info *info); | 470 | static void tdma_reset(struct slgt_info *info); |
471 | static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); | 471 | static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count); |
472 | 472 | ||
473 | static void get_signals(struct slgt_info *info); | 473 | static void get_signals(struct slgt_info *info); |
474 | static void set_signals(struct slgt_info *info); | 474 | static void set_signals(struct slgt_info *info); |
@@ -813,59 +813,32 @@ static int write(struct tty_struct *tty, | |||
813 | int ret = 0; | 813 | int ret = 0; |
814 | struct slgt_info *info = tty->driver_data; | 814 | struct slgt_info *info = tty->driver_data; |
815 | unsigned long flags; | 815 | unsigned long flags; |
816 | unsigned int bufs_needed; | ||
817 | 816 | ||
818 | if (sanity_check(info, tty->name, "write")) | 817 | if (sanity_check(info, tty->name, "write")) |
819 | goto cleanup; | 818 | return -EIO; |
819 | |||
820 | DBGINFO(("%s write count=%d\n", info->device_name, count)); | 820 | DBGINFO(("%s write count=%d\n", info->device_name, count)); |
821 | 821 | ||
822 | if (!info->tx_buf) | 822 | if (!info->tx_buf || (count > info->max_frame_size)) |
823 | goto cleanup; | 823 | return -EIO; |
824 | 824 | ||
825 | if (count > info->max_frame_size) { | 825 | if (!count || tty->stopped || tty->hw_stopped) |
826 | ret = -EIO; | 826 | return 0; |
827 | goto cleanup; | ||
828 | } | ||
829 | 827 | ||
830 | if (!count) | 828 | spin_lock_irqsave(&info->lock, flags); |
831 | goto cleanup; | ||
832 | 829 | ||
833 | if (!info->tx_active && info->tx_count) { | 830 | if (info->tx_count) { |
834 | /* send accumulated data from send_char() */ | 831 | /* send accumulated data from send_char() */ |
835 | tx_load(info, info->tx_buf, info->tx_count); | 832 | if (!tx_load(info, info->tx_buf, info->tx_count)) |
836 | goto start; | 833 | goto cleanup; |
834 | info->tx_count = 0; | ||
837 | } | 835 | } |
838 | bufs_needed = (count/DMABUFSIZE); | ||
839 | if (count % DMABUFSIZE) | ||
840 | ++bufs_needed; | ||
841 | if (bufs_needed > free_tbuf_count(info)) | ||
842 | goto cleanup; | ||
843 | 836 | ||
844 | ret = info->tx_count = count; | 837 | if (tx_load(info, buf, count)) |
845 | tx_load(info, buf, count); | 838 | ret = count; |
846 | goto start; | ||
847 | |||
848 | start: | ||
849 | if (info->tx_count && !tty->stopped && !tty->hw_stopped) { | ||
850 | spin_lock_irqsave(&info->lock,flags); | ||
851 | if (!info->tx_active) | ||
852 | tx_start(info); | ||
853 | else if (!(rd_reg32(info, TDCSR) & BIT0)) { | ||
854 | /* transmit still active but transmit DMA stopped */ | ||
855 | unsigned int i = info->tbuf_current; | ||
856 | if (!i) | ||
857 | i = info->tbuf_count; | ||
858 | i--; | ||
859 | /* if DMA buf unsent must try later after tx idle */ | ||
860 | if (desc_count(info->tbufs[i])) | ||
861 | ret = 0; | ||
862 | } | ||
863 | if (ret > 0) | ||
864 | update_tx_timer(info); | ||
865 | spin_unlock_irqrestore(&info->lock,flags); | ||
866 | } | ||
867 | 839 | ||
868 | cleanup: | 840 | cleanup: |
841 | spin_unlock_irqrestore(&info->lock, flags); | ||
869 | DBGINFO(("%s write rc=%d\n", info->device_name, ret)); | 842 | DBGINFO(("%s write rc=%d\n", info->device_name, ret)); |
870 | return ret; | 843 | return ret; |
871 | } | 844 | } |
@@ -882,7 +855,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch) | |||
882 | if (!info->tx_buf) | 855 | if (!info->tx_buf) |
883 | return 0; | 856 | return 0; |
884 | spin_lock_irqsave(&info->lock,flags); | 857 | spin_lock_irqsave(&info->lock,flags); |
885 | if (!info->tx_active && (info->tx_count < info->max_frame_size)) { | 858 | if (info->tx_count < info->max_frame_size) { |
886 | info->tx_buf[info->tx_count++] = ch; | 859 | info->tx_buf[info->tx_count++] = ch; |
887 | ret = 1; | 860 | ret = 1; |
888 | } | 861 | } |
@@ -981,10 +954,8 @@ static void flush_chars(struct tty_struct *tty) | |||
981 | DBGINFO(("%s flush_chars start transmit\n", info->device_name)); | 954 | DBGINFO(("%s flush_chars start transmit\n", info->device_name)); |
982 | 955 | ||
983 | spin_lock_irqsave(&info->lock,flags); | 956 | spin_lock_irqsave(&info->lock,flags); |
984 | if (!info->tx_active && info->tx_count) { | 957 | if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count)) |
985 | tx_load(info, info->tx_buf,info->tx_count); | 958 | info->tx_count = 0; |
986 | tx_start(info); | ||
987 | } | ||
988 | spin_unlock_irqrestore(&info->lock,flags); | 959 | spin_unlock_irqrestore(&info->lock,flags); |
989 | } | 960 | } |
990 | 961 | ||
@@ -997,10 +968,9 @@ static void flush_buffer(struct tty_struct *tty) | |||
997 | return; | 968 | return; |
998 | DBGINFO(("%s flush_buffer\n", info->device_name)); | 969 | DBGINFO(("%s flush_buffer\n", info->device_name)); |
999 | 970 | ||
1000 | spin_lock_irqsave(&info->lock,flags); | 971 | spin_lock_irqsave(&info->lock, flags); |
1001 | if (!info->tx_active) | 972 | info->tx_count = 0; |
1002 | info->tx_count = 0; | 973 | spin_unlock_irqrestore(&info->lock, flags); |
1003 | spin_unlock_irqrestore(&info->lock,flags); | ||
1004 | 974 | ||
1005 | tty_wakeup(tty); | 975 | tty_wakeup(tty); |
1006 | } | 976 | } |
@@ -1033,12 +1003,10 @@ static void tx_release(struct tty_struct *tty) | |||
1033 | if (sanity_check(info, tty->name, "tx_release")) | 1003 | if (sanity_check(info, tty->name, "tx_release")) |
1034 | return; | 1004 | return; |
1035 | DBGINFO(("%s tx_release\n", info->device_name)); | 1005 | DBGINFO(("%s tx_release\n", info->device_name)); |
1036 | spin_lock_irqsave(&info->lock,flags); | 1006 | spin_lock_irqsave(&info->lock, flags); |
1037 | if (!info->tx_active && info->tx_count) { | 1007 | if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count)) |
1038 | tx_load(info, info->tx_buf, info->tx_count); | 1008 | info->tx_count = 0; |
1039 | tx_start(info); | 1009 | spin_unlock_irqrestore(&info->lock, flags); |
1040 | } | ||
1041 | spin_unlock_irqrestore(&info->lock,flags); | ||
1042 | } | 1010 | } |
1043 | 1011 | ||
1044 | /* | 1012 | /* |
@@ -1506,27 +1474,25 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, | |||
1506 | 1474 | ||
1507 | DBGINFO(("%s hdlc_xmit\n", dev->name)); | 1475 | DBGINFO(("%s hdlc_xmit\n", dev->name)); |
1508 | 1476 | ||
1477 | if (!skb->len) | ||
1478 | return NETDEV_TX_OK; | ||
1479 | |||
1509 | /* stop sending until this frame completes */ | 1480 | /* stop sending until this frame completes */ |
1510 | netif_stop_queue(dev); | 1481 | netif_stop_queue(dev); |
1511 | 1482 | ||
1512 | /* copy data to device buffers */ | ||
1513 | info->tx_count = skb->len; | ||
1514 | tx_load(info, skb->data, skb->len); | ||
1515 | |||
1516 | /* update network statistics */ | 1483 | /* update network statistics */ |
1517 | dev->stats.tx_packets++; | 1484 | dev->stats.tx_packets++; |
1518 | dev->stats.tx_bytes += skb->len; | 1485 | dev->stats.tx_bytes += skb->len; |
1519 | 1486 | ||
1520 | /* done with socket buffer, so free it */ | ||
1521 | dev_kfree_skb(skb); | ||
1522 | |||
1523 | /* save start time for transmit timeout detection */ | 1487 | /* save start time for transmit timeout detection */ |
1524 | dev->trans_start = jiffies; | 1488 | dev->trans_start = jiffies; |
1525 | 1489 | ||
1526 | spin_lock_irqsave(&info->lock,flags); | 1490 | spin_lock_irqsave(&info->lock, flags); |
1527 | tx_start(info); | 1491 | tx_load(info, skb->data, skb->len); |
1528 | update_tx_timer(info); | 1492 | spin_unlock_irqrestore(&info->lock, flags); |
1529 | spin_unlock_irqrestore(&info->lock,flags); | 1493 | |
1494 | /* done with socket buffer, so free it */ | ||
1495 | dev_kfree_skb(skb); | ||
1530 | 1496 | ||
1531 | return NETDEV_TX_OK; | 1497 | return NETDEV_TX_OK; |
1532 | } | 1498 | } |
@@ -2180,7 +2146,7 @@ static void isr_serial(struct slgt_info *info) | |||
2180 | 2146 | ||
2181 | if (info->params.mode == MGSL_MODE_ASYNC) { | 2147 | if (info->params.mode == MGSL_MODE_ASYNC) { |
2182 | if (status & IRQ_TXIDLE) { | 2148 | if (status & IRQ_TXIDLE) { |
2183 | if (info->tx_count) | 2149 | if (info->tx_active) |
2184 | isr_txeom(info, status); | 2150 | isr_txeom(info, status); |
2185 | } | 2151 | } |
2186 | if (info->rx_pio && (status & IRQ_RXDATA)) | 2152 | if (info->rx_pio && (status & IRQ_RXDATA)) |
@@ -2276,13 +2242,42 @@ static void isr_tdma(struct slgt_info *info) | |||
2276 | } | 2242 | } |
2277 | } | 2243 | } |
2278 | 2244 | ||
2245 | /* | ||
2246 | * return true if there are unsent tx DMA buffers, otherwise false | ||
2247 | * | ||
2248 | * if there are unsent buffers then info->tbuf_start | ||
2249 | * is set to index of first unsent buffer | ||
2250 | */ | ||
2251 | static bool unsent_tbufs(struct slgt_info *info) | ||
2252 | { | ||
2253 | unsigned int i = info->tbuf_current; | ||
2254 | bool rc = false; | ||
2255 | |||
2256 | /* | ||
2257 | * search backwards from last loaded buffer (precedes tbuf_current) | ||
2258 | * for first unsent buffer (desc_count > 0) | ||
2259 | */ | ||
2260 | |||
2261 | do { | ||
2262 | if (i) | ||
2263 | i--; | ||
2264 | else | ||
2265 | i = info->tbuf_count - 1; | ||
2266 | if (!desc_count(info->tbufs[i])) | ||
2267 | break; | ||
2268 | info->tbuf_start = i; | ||
2269 | rc = true; | ||
2270 | } while (i != info->tbuf_current); | ||
2271 | |||
2272 | return rc; | ||
2273 | } | ||
2274 | |||
2279 | static void isr_txeom(struct slgt_info *info, unsigned short status) | 2275 | static void isr_txeom(struct slgt_info *info, unsigned short status) |
2280 | { | 2276 | { |
2281 | DBGISR(("%s txeom status=%04x\n", info->device_name, status)); | 2277 | DBGISR(("%s txeom status=%04x\n", info->device_name, status)); |
2282 | 2278 | ||
2283 | slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER); | 2279 | slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER); |
2284 | tdma_reset(info); | 2280 | tdma_reset(info); |
2285 | reset_tbufs(info); | ||
2286 | if (status & IRQ_TXUNDER) { | 2281 | if (status & IRQ_TXUNDER) { |
2287 | unsigned short val = rd_reg16(info, TCR); | 2282 | unsigned short val = rd_reg16(info, TCR); |
2288 | wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */ | 2283 | wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */ |
@@ -2297,8 +2292,12 @@ static void isr_txeom(struct slgt_info *info, unsigned short status) | |||
2297 | info->icount.txok++; | 2292 | info->icount.txok++; |
2298 | } | 2293 | } |
2299 | 2294 | ||
2295 | if (unsent_tbufs(info)) { | ||
2296 | tx_start(info); | ||
2297 | update_tx_timer(info); | ||
2298 | return; | ||
2299 | } | ||
2300 | info->tx_active = false; | 2300 | info->tx_active = false; |
2301 | info->tx_count = 0; | ||
2302 | 2301 | ||
2303 | del_timer(&info->tx_timer); | 2302 | del_timer(&info->tx_timer); |
2304 | 2303 | ||
@@ -3949,7 +3948,7 @@ static void tx_start(struct slgt_info *info) | |||
3949 | info->tx_enabled = true; | 3948 | info->tx_enabled = true; |
3950 | } | 3949 | } |
3951 | 3950 | ||
3952 | if (info->tx_count) { | 3951 | if (desc_count(info->tbufs[info->tbuf_start])) { |
3953 | info->drop_rts_on_tx_done = false; | 3952 | info->drop_rts_on_tx_done = false; |
3954 | 3953 | ||
3955 | if (info->params.mode != MGSL_MODE_ASYNC) { | 3954 | if (info->params.mode != MGSL_MODE_ASYNC) { |
@@ -4772,25 +4771,36 @@ static unsigned int tbuf_bytes(struct slgt_info *info) | |||
4772 | } | 4771 | } |
4773 | 4772 | ||
4774 | /* | 4773 | /* |
4775 | * load transmit DMA buffer(s) with data | 4774 | * load data into transmit DMA buffer ring and start transmitter if needed |
4775 | * return true if data accepted, otherwise false (buffers full) | ||
4776 | */ | 4776 | */ |
4777 | static void tx_load(struct slgt_info *info, const char *buf, unsigned int size) | 4777 | static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size) |
4778 | { | 4778 | { |
4779 | unsigned short count; | 4779 | unsigned short count; |
4780 | unsigned int i; | 4780 | unsigned int i; |
4781 | struct slgt_desc *d; | 4781 | struct slgt_desc *d; |
4782 | 4782 | ||
4783 | if (size == 0) | 4783 | /* check required buffer space */ |
4784 | return; | 4784 | if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info)) |
4785 | return false; | ||
4785 | 4786 | ||
4786 | DBGDATA(info, buf, size, "tx"); | 4787 | DBGDATA(info, buf, size, "tx"); |
4787 | 4788 | ||
4789 | /* | ||
4790 | * copy data to one or more DMA buffers in circular ring | ||
4791 | * tbuf_start = first buffer for this data | ||
4792 | * tbuf_current = next free buffer | ||
4793 | * | ||
4794 | * Copy all data before making data visible to DMA controller by | ||
4795 | * setting descriptor count of the first buffer. | ||
4796 | * This prevents an active DMA controller from reading the first DMA | ||
4797 | * buffers of a frame and stopping before the final buffers are filled. | ||
4798 | */ | ||
4799 | |||
4788 | info->tbuf_start = i = info->tbuf_current; | 4800 | info->tbuf_start = i = info->tbuf_current; |
4789 | 4801 | ||
4790 | while (size) { | 4802 | while (size) { |
4791 | d = &info->tbufs[i]; | 4803 | d = &info->tbufs[i]; |
4792 | if (++i == info->tbuf_count) | ||
4793 | i = 0; | ||
4794 | 4804 | ||
4795 | count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size); | 4805 | count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size); |
4796 | memcpy(d->buf, buf, count); | 4806 | memcpy(d->buf, buf, count); |
@@ -4808,11 +4818,27 @@ static void tx_load(struct slgt_info *info, const char *buf, unsigned int size) | |||
4808 | else | 4818 | else |
4809 | set_desc_eof(*d, 0); | 4819 | set_desc_eof(*d, 0); |
4810 | 4820 | ||
4811 | set_desc_count(*d, count); | 4821 | /* set descriptor count for all but first buffer */ |
4822 | if (i != info->tbuf_start) | ||
4823 | set_desc_count(*d, count); | ||
4812 | d->buf_count = count; | 4824 | d->buf_count = count; |
4825 | |||
4826 | if (++i == info->tbuf_count) | ||
4827 | i = 0; | ||
4813 | } | 4828 | } |
4814 | 4829 | ||
4815 | info->tbuf_current = i; | 4830 | info->tbuf_current = i; |
4831 | |||
4832 | /* set first buffer count to make new data visible to DMA controller */ | ||
4833 | d = &info->tbufs[info->tbuf_start]; | ||
4834 | set_desc_count(*d, d->buf_count); | ||
4835 | |||
4836 | /* start transmitter if needed and update transmit timeout */ | ||
4837 | if (!info->tx_active) | ||
4838 | tx_start(info); | ||
4839 | update_tx_timer(info); | ||
4840 | |||
4841 | return true; | ||
4816 | } | 4842 | } |
4817 | 4843 | ||
4818 | static int register_test(struct slgt_info *info) | 4844 | static int register_test(struct slgt_info *info) |
@@ -4934,9 +4960,7 @@ static int loopback_test(struct slgt_info *info) | |||
4934 | spin_lock_irqsave(&info->lock,flags); | 4960 | spin_lock_irqsave(&info->lock,flags); |
4935 | async_mode(info); | 4961 | async_mode(info); |
4936 | rx_start(info); | 4962 | rx_start(info); |
4937 | info->tx_count = count; | ||
4938 | tx_load(info, buf, count); | 4963 | tx_load(info, buf, count); |
4939 | tx_start(info); | ||
4940 | spin_unlock_irqrestore(&info->lock, flags); | 4964 | spin_unlock_irqrestore(&info->lock, flags); |
4941 | 4965 | ||
4942 | /* wait for receive complete */ | 4966 | /* wait for receive complete */ |