aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wan/dscc4.c
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2008-01-13 09:17:05 -0500
committerJeff Garzik <jeff@garzik.org>2008-01-18 14:44:32 -0500
commit409cd63e6ef6a1aa05baa5bbff5521d62acd246d (patch)
tree75cd418a040f6c45c084df72e48940f290b0cf30 /drivers/net/wan/dscc4.c
parent5f490c9680561e31bf0003693f20e0c7333bbeff (diff)
dscc4 endian fixes
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/wan/dscc4.c')
-rw-r--r--drivers/net/wan/dscc4.c94
1 files changed, 49 insertions, 45 deletions
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 33dc713b5301..c6f26e28e376 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -139,19 +139,21 @@ struct thingie {
139}; 139};
140 140
141struct TxFD { 141struct TxFD {
142 u32 state; 142 __le32 state;
143 u32 next; 143 __le32 next;
144 u32 data; 144 __le32 data;
145 u32 complete; 145 __le32 complete;
146 u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */ 146 u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */
147 /* FWIW, datasheet calls that "dummy" and says that card
148 * never looks at it; neither does the driver */
147}; 149};
148 150
149struct RxFD { 151struct RxFD {
150 u32 state1; 152 __le32 state1;
151 u32 next; 153 __le32 next;
152 u32 data; 154 __le32 data;
153 u32 state2; 155 __le32 state2;
154 u32 end; 156 __le32 end;
155}; 157};
156 158
157#define DUMMY_SKB_SIZE 64 159#define DUMMY_SKB_SIZE 64
@@ -181,7 +183,7 @@ struct RxFD {
181#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) 183#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
182 184
183struct dscc4_pci_priv { 185struct dscc4_pci_priv {
184 u32 *iqcfg; 186 __le32 *iqcfg;
185 int cfg_cur; 187 int cfg_cur;
186 spinlock_t lock; 188 spinlock_t lock;
187 struct pci_dev *pdev; 189 struct pci_dev *pdev;
@@ -197,8 +199,8 @@ struct dscc4_dev_priv {
197 199
198 struct RxFD *rx_fd; 200 struct RxFD *rx_fd;
199 struct TxFD *tx_fd; 201 struct TxFD *tx_fd;
200 u32 *iqrx; 202 __le32 *iqrx;
201 u32 *iqtx; 203 __le32 *iqtx;
202 204
203 /* FIXME: check all the volatile are required */ 205 /* FIXME: check all the volatile are required */
204 volatile u32 tx_current; 206 volatile u32 tx_current;
@@ -298,7 +300,7 @@ struct dscc4_dev_priv {
298#define BrrExpMask 0x00000f00 300#define BrrExpMask 0x00000f00
299#define BrrMultMask 0x0000003f 301#define BrrMultMask 0x0000003f
300#define EncodingMask 0x00700000 302#define EncodingMask 0x00700000
301#define Hold 0x40000000 303#define Hold cpu_to_le32(0x40000000)
302#define SccBusy 0x10000000 304#define SccBusy 0x10000000
303#define PowerUp 0x80000000 305#define PowerUp 0x80000000
304#define Vis 0x00001000 306#define Vis 0x00001000
@@ -307,14 +309,14 @@ struct dscc4_dev_priv {
307#define FrameRdo 0x40 309#define FrameRdo 0x40
308#define FrameCrc 0x20 310#define FrameCrc 0x20
309#define FrameRab 0x10 311#define FrameRab 0x10
310#define FrameAborted 0x00000200 312#define FrameAborted cpu_to_le32(0x00000200)
311#define FrameEnd 0x80000000 313#define FrameEnd cpu_to_le32(0x80000000)
312#define DataComplete 0x40000000 314#define DataComplete cpu_to_le32(0x40000000)
313#define LengthCheck 0x00008000 315#define LengthCheck 0x00008000
314#define SccEvt 0x02000000 316#define SccEvt 0x02000000
315#define NoAck 0x00000200 317#define NoAck 0x00000200
316#define Action 0x00000001 318#define Action 0x00000001
317#define HiDesc 0x20000000 319#define HiDesc cpu_to_le32(0x20000000)
318 320
319/* SCC events */ 321/* SCC events */
320#define RxEvt 0xf0000000 322#define RxEvt 0xf0000000
@@ -489,8 +491,8 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
489 skbuff = dpriv->tx_skbuff; 491 skbuff = dpriv->tx_skbuff;
490 for (i = 0; i < TX_RING_SIZE; i++) { 492 for (i = 0; i < TX_RING_SIZE; i++) {
491 if (*skbuff) { 493 if (*skbuff) {
492 pci_unmap_single(pdev, tx_fd->data, (*skbuff)->len, 494 pci_unmap_single(pdev, le32_to_cpu(tx_fd->data),
493 PCI_DMA_TODEVICE); 495 (*skbuff)->len, PCI_DMA_TODEVICE);
494 dev_kfree_skb(*skbuff); 496 dev_kfree_skb(*skbuff);
495 } 497 }
496 skbuff++; 498 skbuff++;
@@ -500,7 +502,7 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
500 skbuff = dpriv->rx_skbuff; 502 skbuff = dpriv->rx_skbuff;
501 for (i = 0; i < RX_RING_SIZE; i++) { 503 for (i = 0; i < RX_RING_SIZE; i++) {
502 if (*skbuff) { 504 if (*skbuff) {
503 pci_unmap_single(pdev, rx_fd->data, 505 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
504 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); 506 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
505 dev_kfree_skb(*skbuff); 507 dev_kfree_skb(*skbuff);
506 } 508 }
@@ -522,10 +524,10 @@ static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
522 dpriv->rx_skbuff[dirty] = skb; 524 dpriv->rx_skbuff[dirty] = skb;
523 if (skb) { 525 if (skb) {
524 skb->protocol = hdlc_type_trans(skb, dev); 526 skb->protocol = hdlc_type_trans(skb, dev);
525 rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, 527 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
526 len, PCI_DMA_FROMDEVICE); 528 skb->data, len, PCI_DMA_FROMDEVICE));
527 } else { 529 } else {
528 rx_fd->data = (u32) NULL; 530 rx_fd->data = 0;
529 ret = -1; 531 ret = -1;
530 } 532 }
531 return ret; 533 return ret;
@@ -587,7 +589,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
587 589
588 do { 590 do {
589 if (!(dpriv->flags & (NeedIDR | NeedIDT)) || 591 if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
590 (dpriv->iqtx[cur] & Xpr)) 592 (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
591 break; 593 break;
592 smp_rmb(); 594 smp_rmb();
593 schedule_timeout_uninterruptible(10); 595 schedule_timeout_uninterruptible(10);
@@ -650,8 +652,9 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
650 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__); 652 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__);
651 goto refill; 653 goto refill;
652 } 654 }
653 pkt_len = TO_SIZE(rx_fd->state2); 655 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
654 pci_unmap_single(pdev, rx_fd->data, RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); 656 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
657 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
655 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { 658 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
656 stats->rx_packets++; 659 stats->rx_packets++;
657 stats->rx_bytes += pkt_len; 660 stats->rx_bytes += pkt_len;
@@ -679,7 +682,7 @@ refill:
679 } 682 }
680 dscc4_rx_update(dpriv, dev); 683 dscc4_rx_update(dpriv, dev);
681 rx_fd->state2 = 0x00000000; 684 rx_fd->state2 = 0x00000000;
682 rx_fd->end = 0xbabeface; 685 rx_fd->end = cpu_to_le32(0xbabeface);
683} 686}
684 687
685static void dscc4_free1(struct pci_dev *pdev) 688static void dscc4_free1(struct pci_dev *pdev)
@@ -772,8 +775,8 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
772 } 775 }
773 /* Global interrupt queue */ 776 /* Global interrupt queue */
774 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); 777 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
775 priv->iqcfg = (u32 *) pci_alloc_consistent(pdev, 778 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
776 IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma); 779 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
777 if (!priv->iqcfg) 780 if (!priv->iqcfg)
778 goto err_free_irq_5; 781 goto err_free_irq_5;
779 writel(priv->iqcfg_dma, ioaddr + IQCFG); 782 writel(priv->iqcfg_dma, ioaddr + IQCFG);
@@ -786,7 +789,7 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
786 */ 789 */
787 for (i = 0; i < dev_per_card; i++) { 790 for (i = 0; i < dev_per_card; i++) {
788 dpriv = priv->root + i; 791 dpriv = priv->root + i;
789 dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev, 792 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev,
790 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); 793 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
791 if (!dpriv->iqtx) 794 if (!dpriv->iqtx)
792 goto err_free_iqtx_6; 795 goto err_free_iqtx_6;
@@ -794,7 +797,7 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
794 } 797 }
795 for (i = 0; i < dev_per_card; i++) { 798 for (i = 0; i < dev_per_card; i++) {
796 dpriv = priv->root + i; 799 dpriv = priv->root + i;
797 dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev, 800 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev,
798 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); 801 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
799 if (!dpriv->iqrx) 802 if (!dpriv->iqrx)
800 goto err_free_iqrx_7; 803 goto err_free_iqrx_7;
@@ -1156,8 +1159,8 @@ static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
1156 dpriv->tx_skbuff[next] = skb; 1159 dpriv->tx_skbuff[next] = skb;
1157 tx_fd = dpriv->tx_fd + next; 1160 tx_fd = dpriv->tx_fd + next;
1158 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len); 1161 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
1159 tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len, 1162 tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len,
1160 PCI_DMA_TODEVICE); 1163 PCI_DMA_TODEVICE));
1161 tx_fd->complete = 0x00000000; 1164 tx_fd->complete = 0x00000000;
1162 tx_fd->jiffies = jiffies; 1165 tx_fd->jiffies = jiffies;
1163 mb(); 1166 mb();
@@ -1508,7 +1511,7 @@ static irqreturn_t dscc4_irq(int irq, void *token)
1508 if (state & Cfg) { 1511 if (state & Cfg) {
1509 if (debug > 0) 1512 if (debug > 0)
1510 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME); 1513 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
1511 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & Arf) 1514 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf))
1512 printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG"); 1515 printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
1513 if (!(state &= ~Cfg)) 1516 if (!(state &= ~Cfg))
1514 goto out; 1517 goto out;
@@ -1541,7 +1544,7 @@ static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
1541 1544
1542try: 1545try:
1543 cur = dpriv->iqtx_current%IRQ_RING_SIZE; 1546 cur = dpriv->iqtx_current%IRQ_RING_SIZE;
1544 state = dpriv->iqtx[cur]; 1547 state = le32_to_cpu(dpriv->iqtx[cur]);
1545 if (!state) { 1548 if (!state) {
1546 if (debug > 4) 1549 if (debug > 4)
1547 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name, 1550 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
@@ -1580,7 +1583,7 @@ try:
1580 tx_fd = dpriv->tx_fd + cur; 1583 tx_fd = dpriv->tx_fd + cur;
1581 skb = dpriv->tx_skbuff[cur]; 1584 skb = dpriv->tx_skbuff[cur];
1582 if (skb) { 1585 if (skb) {
1583 pci_unmap_single(ppriv->pdev, tx_fd->data, 1586 pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data),
1584 skb->len, PCI_DMA_TODEVICE); 1587 skb->len, PCI_DMA_TODEVICE);
1585 if (tx_fd->state & FrameEnd) { 1588 if (tx_fd->state & FrameEnd) {
1586 stats->tx_packets++; 1589 stats->tx_packets++;
@@ -1711,7 +1714,7 @@ static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
1711 1714
1712try: 1715try:
1713 cur = dpriv->iqrx_current%IRQ_RING_SIZE; 1716 cur = dpriv->iqrx_current%IRQ_RING_SIZE;
1714 state = dpriv->iqrx[cur]; 1717 state = le32_to_cpu(dpriv->iqrx[cur]);
1715 if (!state) 1718 if (!state)
1716 return; 1719 return;
1717 dpriv->iqrx[cur] = 0; 1720 dpriv->iqrx[cur] = 0;
@@ -1755,7 +1758,7 @@ try:
1755 goto try; 1758 goto try;
1756 rx_fd->state1 &= ~Hold; 1759 rx_fd->state1 &= ~Hold;
1757 rx_fd->state2 = 0x00000000; 1760 rx_fd->state2 = 0x00000000;
1758 rx_fd->end = 0xbabeface; 1761 rx_fd->end = cpu_to_le32(0xbabeface);
1759 //} 1762 //}
1760 goto try; 1763 goto try;
1761 } 1764 }
@@ -1834,7 +1837,7 @@ try:
1834 hdlc_stats(dev)->rx_over_errors++; 1837 hdlc_stats(dev)->rx_over_errors++;
1835 rx_fd->state1 |= Hold; 1838 rx_fd->state1 |= Hold;
1836 rx_fd->state2 = 0x00000000; 1839 rx_fd->state2 = 0x00000000;
1837 rx_fd->end = 0xbabeface; 1840 rx_fd->end = cpu_to_le32(0xbabeface);
1838 } else 1841 } else
1839 dscc4_rx_skb(dpriv, dev); 1842 dscc4_rx_skb(dpriv, dev);
1840 } while (1); 1843 } while (1);
@@ -1904,8 +1907,9 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1904 skb_copy_to_linear_data(skb, version, 1907 skb_copy_to_linear_data(skb, version,
1905 strlen(version) % DUMMY_SKB_SIZE); 1908 strlen(version) % DUMMY_SKB_SIZE);
1906 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); 1909 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
1907 tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, 1910 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
1908 DUMMY_SKB_SIZE, PCI_DMA_TODEVICE); 1911 skb->data, DUMMY_SKB_SIZE,
1912 PCI_DMA_TODEVICE));
1909 dpriv->tx_skbuff[last] = skb; 1913 dpriv->tx_skbuff[last] = skb;
1910 } 1914 }
1911 return skb; 1915 return skb;
@@ -1937,8 +1941,8 @@ static int dscc4_init_ring(struct net_device *dev)
1937 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); 1941 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1938 tx_fd->complete = 0x00000000; 1942 tx_fd->complete = 0x00000000;
1939 /* FIXME: NULL should be ok - to be tried */ 1943 /* FIXME: NULL should be ok - to be tried */
1940 tx_fd->data = dpriv->tx_fd_dma; 1944 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma);
1941 (tx_fd++)->next = (u32)(dpriv->tx_fd_dma + 1945 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma +
1942 (++i%TX_RING_SIZE)*sizeof(*tx_fd)); 1946 (++i%TX_RING_SIZE)*sizeof(*tx_fd));
1943 } while (i < TX_RING_SIZE); 1947 } while (i < TX_RING_SIZE);
1944 1948
@@ -1951,12 +1955,12 @@ static int dscc4_init_ring(struct net_device *dev)
1951 /* size set by the host. Multiple of 4 bytes please */ 1955 /* size set by the host. Multiple of 4 bytes please */
1952 rx_fd->state1 = HiDesc; 1956 rx_fd->state1 = HiDesc;
1953 rx_fd->state2 = 0x00000000; 1957 rx_fd->state2 = 0x00000000;
1954 rx_fd->end = 0xbabeface; 1958 rx_fd->end = cpu_to_le32(0xbabeface);
1955 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU); 1959 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
1956 // FIXME: return value verifiee mais traitement suspect 1960 // FIXME: return value verifiee mais traitement suspect
1957 if (try_get_rx_skb(dpriv, dev) >= 0) 1961 if (try_get_rx_skb(dpriv, dev) >= 0)
1958 dpriv->rx_dirty++; 1962 dpriv->rx_dirty++;
1959 (rx_fd++)->next = (u32)(dpriv->rx_fd_dma + 1963 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma +
1960 (++i%RX_RING_SIZE)*sizeof(*rx_fd)); 1964 (++i%RX_RING_SIZE)*sizeof(*rx_fd));
1961 } while (i < RX_RING_SIZE); 1965 } while (i < RX_RING_SIZE);
1962 1966