aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ll_temac_main.c
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/net/ll_temac_main.c
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/net/ll_temac_main.c')
-rw-r--r--drivers/net/ll_temac_main.c159
1 files changed, 114 insertions, 45 deletions
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index dc318330ec79..fa7620e28404 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -20,9 +20,6 @@
20 * or rx, so this should be okay. 20 * or rx, so this should be okay.
21 * 21 *
22 * TODO: 22 * TODO:
23 * - Fix driver to work on more than just Virtex5. Right now the driver
24 * assumes that the locallink DMA registers are accessed via DCR
25 * instructions.
26 * - Factor out locallink DMA code into separate driver 23 * - Factor out locallink DMA code into separate driver
27 * - Fix multicast assignment. 24 * - Fix multicast assignment.
28 * - Fix support for hardware checksumming. 25 * - Fix support for hardware checksumming.
@@ -116,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 113 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
117} 114}
118 115
116/**
117 * temac_dma_in32 - Memory mapped DMA read, this function expects a
118 * register input that is based on DCR word addresses which
119 * are then converted to memory mapped byte addresses
120 */
119static u32 temac_dma_in32(struct temac_local *lp, int reg) 121static u32 temac_dma_in32(struct temac_local *lp, int reg)
120{ 122{
121 return dcr_read(lp->sdma_dcrs, reg); 123 return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
122} 124}
123 125
126/**
127 * temac_dma_out32 - Memory mapped DMA read, this function expects a
128 * register input that is based on DCR word addresses which
129 * are then converted to memory mapped byte addresses
130 */
124static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) 131static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
125{ 132{
133 out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
134}
135
136/* DMA register access functions can be DCR based or memory mapped.
137 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
138 * memory mapped.
139 */
140#ifdef CONFIG_PPC_DCR
141
142/**
143 * temac_dma_dcr_in32 - DCR based DMA read
144 */
145static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
146{
147 return dcr_read(lp->sdma_dcrs, reg);
148}
149
150/**
151 * temac_dma_dcr_out32 - DCR based DMA write
152 */
153static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
154{
126 dcr_write(lp->sdma_dcrs, reg, value); 155 dcr_write(lp->sdma_dcrs, reg, value);
127} 156}
128 157
129/** 158/**
159 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
160 * I/O functions
161 */
162static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
163 struct device_node *np)
164{
165 unsigned int dcrs;
166
167 /* setup the dcr address mapping if it's in the device tree */
168
169 dcrs = dcr_resource_start(np, 0);
170 if (dcrs != 0) {
171 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
172 lp->dma_in = temac_dma_dcr_in;
173 lp->dma_out = temac_dma_dcr_out;
174 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
175 return 0;
176 }
177 /* no DCR in the device tree, indicate a failure */
178 return -1;
179}
180
181#else
182
183/*
184 * temac_dcr_setup - This is a stub for when DCR is not supported,
185 * such as with MicroBlaze
186 */
187static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
188 struct device_node *np)
189{
190 return -1;
191}
192
193#endif
194
195/**
130 * temac_dma_bd_init - Setup buffer descriptor rings 196 * temac_dma_bd_init - Setup buffer descriptor rings
131 */ 197 */
132static int temac_dma_bd_init(struct net_device *ndev) 198static int temac_dma_bd_init(struct net_device *ndev)
@@ -156,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
156 lp->rx_bd_v[i].next = lp->rx_bd_p + 222 lp->rx_bd_v[i].next = lp->rx_bd_p +
157 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); 223 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
158 224
159 skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE 225 skb = netdev_alloc_skb_ip_align(ndev,
160 + XTE_ALIGN, GFP_ATOMIC); 226 XTE_MAX_JUMBO_FRAME_SIZE);
227
161 if (skb == 0) { 228 if (skb == 0) {
162 dev_err(&ndev->dev, "alloc_skb error %d\n", i); 229 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
163 return -1; 230 return -1;
164 } 231 }
165 lp->rx_skb[i] = skb; 232 lp->rx_skb[i] = skb;
166 skb_reserve(skb, BUFFER_ALIGN(skb->data));
167 /* returns physical address of skb->data */ 233 /* returns physical address of skb->data */
168 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
169 skb->data, 235 skb->data,
@@ -173,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
173 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; 239 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
174 } 240 }
175 241
176 temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 | 242 lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
177 CHNL_CTRL_IRQ_EN | 243 CHNL_CTRL_IRQ_EN |
178 CHNL_CTRL_IRQ_DLY_EN | 244 CHNL_CTRL_IRQ_DLY_EN |
179 CHNL_CTRL_IRQ_COAL_EN); 245 CHNL_CTRL_IRQ_COAL_EN);
180 /* 0x10220483 */ 246 /* 0x10220483 */
181 /* 0x00100483 */ 247 /* 0x00100483 */
182 temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 | 248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 |
183 CHNL_CTRL_IRQ_EN | 249 CHNL_CTRL_IRQ_EN |
184 CHNL_CTRL_IRQ_DLY_EN | 250 CHNL_CTRL_IRQ_DLY_EN |
185 CHNL_CTRL_IRQ_COAL_EN | 251 CHNL_CTRL_IRQ_COAL_EN |
186 CHNL_CTRL_IRQ_IOE); 252 CHNL_CTRL_IRQ_IOE);
187 /* 0xff010283 */ 253 /* 0xff010283 */
188 254
189 temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p); 255 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
190 temac_dma_out32(lp, RX_TAILDESC_PTR, 256 lp->dma_out(lp, RX_TAILDESC_PTR,
191 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 257 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
192 temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p); 258 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
193 259
194 return 0; 260 return 0;
195} 261}
@@ -251,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev)
251 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); 317 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
252 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 318 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
253 } else if (!netdev_mc_empty(ndev)) { 319 } else if (!netdev_mc_empty(ndev)) {
254 struct dev_mc_list *mclist; 320 struct netdev_hw_addr *ha;
255 321
256 i = 0; 322 i = 0;
257 netdev_for_each_mc_addr(mclist, ndev) { 323 netdev_for_each_mc_addr(ha, ndev) {
258 if (i >= MULTICAST_CAM_TABLE_NUM) 324 if (i >= MULTICAST_CAM_TABLE_NUM)
259 break; 325 break;
260 multi_addr_msw = ((mclist->dmi_addr[3] << 24) | 326 multi_addr_msw = ((ha->addr[3] << 24) |
261 (mclist->dmi_addr[2] << 16) | 327 (ha->addr[2] << 16) |
262 (mclist->dmi_addr[1] << 8) | 328 (ha->addr[1] << 8) |
263 (mclist->dmi_addr[0])); 329 (ha->addr[0]));
264 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 330 temac_indirect_out32(lp, XTE_MAW0_OFFSET,
265 multi_addr_msw); 331 multi_addr_msw);
266 multi_addr_lsw = ((mclist->dmi_addr[5] << 8) | 332 multi_addr_lsw = ((ha->addr[5] << 8) |
267 (mclist->dmi_addr[4]) | (i << 16)); 333 (ha->addr[4]) | (i << 16));
268 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 334 temac_indirect_out32(lp, XTE_MAW1_OFFSET,
269 multi_addr_lsw); 335 multi_addr_lsw);
270 i++; 336 i++;
@@ -427,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev)
427 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); 493 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
428 494
429 /* Reset Local Link (DMA) */ 495 /* Reset Local Link (DMA) */
430 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); 496 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
431 timeout = 1000; 497 timeout = 1000;
432 while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { 498 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
433 udelay(1); 499 udelay(1);
434 if (--timeout == 0) { 500 if (--timeout == 0) {
435 dev_err(&ndev->dev, 501 dev_err(&ndev->dev,
@@ -437,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev)
437 break; 503 break;
438 } 504 }
439 } 505 }
440 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); 506 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
441 507
442 temac_dma_bd_init(ndev); 508 temac_dma_bd_init(ndev);
443 509
@@ -461,7 +527,7 @@ static void temac_device_reset(struct net_device *ndev)
461 dev_err(&ndev->dev, "Error setting TEMAC options\n"); 527 dev_err(&ndev->dev, "Error setting TEMAC options\n");
462 528
463 /* Init Driver variable */ 529 /* Init Driver variable */
464 ndev->trans_start = 0; 530 ndev->trans_start = jiffies; /* prevent tx timeout */
465} 531}
466 532
467void temac_adjust_link(struct net_device *ndev) 533void temac_adjust_link(struct net_device *ndev)
@@ -598,7 +664,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
598 lp->tx_bd_tail = 0; 664 lp->tx_bd_tail = 0;
599 665
600 /* Kick off the transfer */ 666 /* Kick off the transfer */
601 temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ 667 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
602 668
603 return NETDEV_TX_OK; 669 return NETDEV_TX_OK;
604} 670}
@@ -612,7 +678,6 @@ static void ll_temac_recv(struct net_device *ndev)
612 struct cdmac_bd *cur_p; 678 struct cdmac_bd *cur_p;
613 dma_addr_t tail_p; 679 dma_addr_t tail_p;
614 int length; 680 int length;
615 unsigned long skb_vaddr;
616 unsigned long flags; 681 unsigned long flags;
617 682
618 spin_lock_irqsave(&lp->rx_lock, flags); 683 spin_lock_irqsave(&lp->rx_lock, flags);
@@ -626,8 +691,7 @@ static void ll_temac_recv(struct net_device *ndev)
626 skb = lp->rx_skb[lp->rx_bd_ci]; 691 skb = lp->rx_skb[lp->rx_bd_ci];
627 length = cur_p->app4 & 0x3FFF; 692 length = cur_p->app4 & 0x3FFF;
628 693
629 skb_vaddr = virt_to_bus(skb->data); 694 dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
630 dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
631 DMA_FROM_DEVICE); 695 DMA_FROM_DEVICE);
632 696
633 skb_put(skb, length); 697 skb_put(skb, length);
@@ -640,16 +704,15 @@ static void ll_temac_recv(struct net_device *ndev)
640 ndev->stats.rx_packets++; 704 ndev->stats.rx_packets++;
641 ndev->stats.rx_bytes += length; 705 ndev->stats.rx_bytes += length;
642 706
643 new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN, 707 new_skb = netdev_alloc_skb_ip_align(ndev,
644 GFP_ATOMIC); 708 XTE_MAX_JUMBO_FRAME_SIZE);
709
645 if (new_skb == 0) { 710 if (new_skb == 0) {
646 dev_err(&ndev->dev, "no memory for new sk_buff\n"); 711 dev_err(&ndev->dev, "no memory for new sk_buff\n");
647 spin_unlock_irqrestore(&lp->rx_lock, flags); 712 spin_unlock_irqrestore(&lp->rx_lock, flags);
648 return; 713 return;
649 } 714 }
650 715
651 skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
652
653 cur_p->app0 = STS_CTRL_APP0_IRQONEND; 716 cur_p->app0 = STS_CTRL_APP0_IRQONEND;
654 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 717 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
655 XTE_MAX_JUMBO_FRAME_SIZE, 718 XTE_MAX_JUMBO_FRAME_SIZE,
@@ -664,7 +727,7 @@ static void ll_temac_recv(struct net_device *ndev)
664 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 727 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
665 bdstat = cur_p->app0; 728 bdstat = cur_p->app0;
666 } 729 }
667 temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p); 730 lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
668 731
669 spin_unlock_irqrestore(&lp->rx_lock, flags); 732 spin_unlock_irqrestore(&lp->rx_lock, flags);
670} 733}
@@ -675,8 +738,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
675 struct temac_local *lp = netdev_priv(ndev); 738 struct temac_local *lp = netdev_priv(ndev);
676 unsigned int status; 739 unsigned int status;
677 740
678 status = temac_dma_in32(lp, TX_IRQ_REG); 741 status = lp->dma_in(lp, TX_IRQ_REG);
679 temac_dma_out32(lp, TX_IRQ_REG, status); 742 lp->dma_out(lp, TX_IRQ_REG, status);
680 743
681 if (status & (IRQ_COAL | IRQ_DLY)) 744 if (status & (IRQ_COAL | IRQ_DLY))
682 temac_start_xmit_done(lp->ndev); 745 temac_start_xmit_done(lp->ndev);
@@ -693,8 +756,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
693 unsigned int status; 756 unsigned int status;
694 757
695 /* Read and clear the status registers */ 758 /* Read and clear the status registers */
696 status = temac_dma_in32(lp, RX_IRQ_REG); 759 status = lp->dma_in(lp, RX_IRQ_REG);
697 temac_dma_out32(lp, RX_IRQ_REG, status); 760 lp->dma_out(lp, RX_IRQ_REG, status);
698 761
699 if (status & (IRQ_COAL | IRQ_DLY)) 762 if (status & (IRQ_COAL | IRQ_DLY))
700 ll_temac_recv(lp->ndev); 763 ll_temac_recv(lp->ndev);
@@ -795,7 +858,7 @@ static ssize_t temac_show_llink_regs(struct device *dev,
795 int i, len = 0; 858 int i, len = 0;
796 859
797 for (i = 0; i < 0x11; i++) 860 for (i = 0; i < 0x11; i++)
798 len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i), 861 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
799 (i % 8) == 7 ? "\n" : " "); 862 (i % 8) == 7 ? "\n" : " ");
800 len += sprintf(buf + len, "\n"); 863 len += sprintf(buf + len, "\n");
801 864
@@ -821,7 +884,6 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
821 struct net_device *ndev; 884 struct net_device *ndev;
822 const void *addr; 885 const void *addr;
823 int size, rc = 0; 886 int size, rc = 0;
824 unsigned int dcrs;
825 887
826 /* Init network device structure */ 888 /* Init network device structure */
827 ndev = alloc_etherdev(sizeof(*lp)); 889 ndev = alloc_etherdev(sizeof(*lp));
@@ -871,13 +933,20 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
871 goto nodev; 933 goto nodev;
872 } 934 }
873 935
874 dcrs = dcr_resource_start(np, 0); 936 /* Setup the DMA register accesses, could be DCR or memory mapped */
875 if (dcrs == 0) { 937 if (temac_dcr_setup(lp, op, np)) {
876 dev_err(&op->dev, "could not get DMA register address\n"); 938
877 goto nodev; 939 /* no DCR in the device tree, try non-DCR */
940 lp->sdma_regs = of_iomap(np, 0);
941 if (lp->sdma_regs) {
942 lp->dma_in = temac_dma_in32;
943 lp->dma_out = temac_dma_out32;
944 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
945 } else {
946 dev_err(&op->dev, "unable to map DMA registers\n");
947 goto nodev;
948 }
878 } 949 }
879 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
880 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
881 950
882 lp->rx_irq = irq_of_parse_and_map(np, 0); 951 lp->rx_irq = irq_of_parse_and_map(np, 0);
883 lp->tx_irq = irq_of_parse_and_map(np, 1); 952 lp->tx_irq = irq_of_parse_and_map(np, 1);