aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethoc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethoc.c')
-rw-r--r--drivers/net/ethoc.c104
1 files changed, 67 insertions, 37 deletions
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b7311bc00258..f7d9ac8324cb 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -17,8 +17,13 @@
17#include <linux/mii.h> 17#include <linux/mii.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h>
20#include <net/ethoc.h> 21#include <net/ethoc.h>
21 22
23static int buffer_size = 0x8000; /* 32 KBytes */
24module_param(buffer_size, int, 0);
25MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
26
22/* register offsets */ 27/* register offsets */
23#define MODER 0x00 28#define MODER 0x00
24#define INT_SOURCE 0x04 29#define INT_SOURCE 0x04
@@ -167,6 +172,7 @@
167 * struct ethoc - driver-private device structure 172 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region 173 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region 174 * @membase: pointer to buffer memory region
175 * @dma_alloc: dma allocated buffer size
170 * @num_tx: number of send buffers 176 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written 177 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent 178 * @dty_tx: last buffer actually sent
@@ -185,6 +191,7 @@
185struct ethoc { 191struct ethoc {
186 void __iomem *iobase; 192 void __iomem *iobase;
187 void __iomem *membase; 193 void __iomem *membase;
194 int dma_alloc;
188 195
189 unsigned int num_tx; 196 unsigned int num_tx;
190 unsigned int cur_tx; 197 unsigned int cur_tx;
@@ -216,24 +223,25 @@ struct ethoc_bd {
216 u32 addr; 223 u32 addr;
217}; 224};
218 225
219static u32 ethoc_read(struct ethoc *dev, loff_t offset) 226static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
220{ 227{
221 return ioread32(dev->iobase + offset); 228 return ioread32(dev->iobase + offset);
222} 229}
223 230
224static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 231static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
225{ 232{
226 iowrite32(data, dev->iobase + offset); 233 iowrite32(data, dev->iobase + offset);
227} 234}
228 235
229static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) 236static inline void ethoc_read_bd(struct ethoc *dev, int index,
237 struct ethoc_bd *bd)
230{ 238{
231 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
232 bd->stat = ethoc_read(dev, offset + 0); 240 bd->stat = ethoc_read(dev, offset + 0);
233 bd->addr = ethoc_read(dev, offset + 4); 241 bd->addr = ethoc_read(dev, offset + 4);
234} 242}
235 243
236static void ethoc_write_bd(struct ethoc *dev, int index, 244static inline void ethoc_write_bd(struct ethoc *dev, int index,
237 const struct ethoc_bd *bd) 245 const struct ethoc_bd *bd)
238{ 246{
239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 247 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -241,33 +249,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
241 ethoc_write(dev, offset + 4, bd->addr); 249 ethoc_write(dev, offset + 4, bd->addr);
242} 250}
243 251
244static void ethoc_enable_irq(struct ethoc *dev, u32 mask) 252static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
245{ 253{
246 u32 imask = ethoc_read(dev, INT_MASK); 254 u32 imask = ethoc_read(dev, INT_MASK);
247 imask |= mask; 255 imask |= mask;
248 ethoc_write(dev, INT_MASK, imask); 256 ethoc_write(dev, INT_MASK, imask);
249} 257}
250 258
251static void ethoc_disable_irq(struct ethoc *dev, u32 mask) 259static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
252{ 260{
253 u32 imask = ethoc_read(dev, INT_MASK); 261 u32 imask = ethoc_read(dev, INT_MASK);
254 imask &= ~mask; 262 imask &= ~mask;
255 ethoc_write(dev, INT_MASK, imask); 263 ethoc_write(dev, INT_MASK, imask);
256} 264}
257 265
258static void ethoc_ack_irq(struct ethoc *dev, u32 mask) 266static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
259{ 267{
260 ethoc_write(dev, INT_SOURCE, mask); 268 ethoc_write(dev, INT_SOURCE, mask);
261} 269}
262 270
263static void ethoc_enable_rx_and_tx(struct ethoc *dev) 271static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
264{ 272{
265 u32 mode = ethoc_read(dev, MODER); 273 u32 mode = ethoc_read(dev, MODER);
266 mode |= MODER_RXEN | MODER_TXEN; 274 mode |= MODER_RXEN | MODER_TXEN;
267 ethoc_write(dev, MODER, mode); 275 ethoc_write(dev, MODER, mode);
268} 276}
269 277
270static void ethoc_disable_rx_and_tx(struct ethoc *dev) 278static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
271{ 279{
272 u32 mode = ethoc_read(dev, MODER); 280 u32 mode = ethoc_read(dev, MODER);
273 mode &= ~(MODER_RXEN | MODER_TXEN); 281 mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -284,7 +292,7 @@ static int ethoc_init_ring(struct ethoc *dev)
284 dev->cur_rx = 0; 292 dev->cur_rx = 0;
285 293
286 /* setup transmission buffers */ 294 /* setup transmission buffers */
287 bd.addr = 0; 295 bd.addr = virt_to_phys(dev->membase);
288 bd.stat = TX_BD_IRQ | TX_BD_CRC; 296 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289 297
290 for (i = 0; i < dev->num_tx; i++) { 298 for (i = 0; i < dev->num_tx; i++) {
@@ -295,7 +303,6 @@ static int ethoc_init_ring(struct ethoc *dev)
295 bd.addr += ETHOC_BUFSIZ; 303 bd.addr += ETHOC_BUFSIZ;
296 } 304 }
297 305
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 306 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300 307
301 for (i = 0; i < dev->num_rx; i++) { 308 for (i = 0; i < dev->num_rx; i++) {
@@ -400,8 +407,12 @@ static int ethoc_rx(struct net_device *dev, int limit)
400 if (ethoc_update_rx_stats(priv, &bd) == 0) { 407 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16; 408 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size); 409 struct sk_buff *skb = netdev_alloc_skb(dev, size);
410
411 size -= 4; /* strip the CRC */
412 skb_reserve(skb, 2); /* align TCP/IP header */
413
403 if (likely(skb)) { 414 if (likely(skb)) {
404 void *src = priv->membase + bd.addr; 415 void *src = phys_to_virt(bd.addr);
405 memcpy_fromio(skb_put(skb, size), src, size); 416 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev); 417 skb->protocol = eth_type_trans(skb, dev);
407 priv->stats.rx_packets++; 418 priv->stats.rx_packets++;
@@ -498,7 +509,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
498 return IRQ_NONE; 509 return IRQ_NONE;
499 } 510 }
500 511
501 ethoc_ack_irq(priv, INT_MASK_ALL); 512 ethoc_ack_irq(priv, pending);
502 513
503 if (pending & INT_MASK_BUSY) { 514 if (pending & INT_MASK_BUSY) {
504 dev_err(&dev->dev, "packet dropped\n"); 515 dev_err(&dev->dev, "packet dropped\n");
@@ -653,9 +664,10 @@ static int ethoc_open(struct net_device *dev)
653 if (ret) 664 if (ret)
654 return ret; 665 return ret;
655 666
656 /* calculate the number of TX/RX buffers */ 667 /* calculate the number of TX/RX buffers, maximum 128 supported */
657 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; 668 num_bd = min_t(unsigned int,
658 priv->num_tx = min(min_tx, num_bd / 4); 669 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
670 priv->num_tx = max(min_tx, num_bd / 4);
659 priv->num_rx = num_bd - priv->num_tx; 671 priv->num_rx = num_bd - priv->num_tx;
660 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 672 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
661 673
@@ -823,7 +835,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
823 else 835 else
824 bd.stat &= ~TX_BD_PAD; 836 bd.stat &= ~TX_BD_PAD;
825 837
826 dest = priv->membase + bd.addr; 838 dest = phys_to_virt(bd.addr);
827 memcpy_toio(dest, skb->data, skb->len); 839 memcpy_toio(dest, skb->data, skb->len);
828 840
829 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 841 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -903,22 +915,19 @@ static int ethoc_probe(struct platform_device *pdev)
903 915
904 /* obtain buffer memory space */ 916 /* obtain buffer memory space */
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
906 if (!res) { 918 if (res) {
907 dev_err(&pdev->dev, "cannot obtain memory space\n"); 919 mem = devm_request_mem_region(&pdev->dev, res->start,
908 ret = -ENXIO;
909 goto free;
910 }
911
912 mem = devm_request_mem_region(&pdev->dev, res->start,
913 res->end - res->start + 1, res->name); 920 res->end - res->start + 1, res->name);
914 if (!mem) { 921 if (!mem) {
915 dev_err(&pdev->dev, "cannot request memory space\n"); 922 dev_err(&pdev->dev, "cannot request memory space\n");
916 ret = -ENXIO; 923 ret = -ENXIO;
917 goto free; 924 goto free;
925 }
926
927 netdev->mem_start = mem->start;
928 netdev->mem_end = mem->end;
918 } 929 }
919 930
920 netdev->mem_start = mem->start;
921 netdev->mem_end = mem->end;
922 931
923 /* obtain device IRQ number */ 932 /* obtain device IRQ number */
924 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 933 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -933,6 +942,7 @@ static int ethoc_probe(struct platform_device *pdev)
933 /* setup driver-private data */ 942 /* setup driver-private data */
934 priv = netdev_priv(netdev); 943 priv = netdev_priv(netdev);
935 priv->netdev = netdev; 944 priv->netdev = netdev;
945 priv->dma_alloc = 0;
936 946
937 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
938 mmio->end - mmio->start + 1); 948 mmio->end - mmio->start + 1);
@@ -942,12 +952,27 @@ static int ethoc_probe(struct platform_device *pdev)
942 goto error; 952 goto error;
943 } 953 }
944 954
945 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, 955 if (netdev->mem_end) {
946 mem->end - mem->start + 1); 956 priv->membase = devm_ioremap_nocache(&pdev->dev,
947 if (!priv->membase) { 957 netdev->mem_start, mem->end - mem->start + 1);
948 dev_err(&pdev->dev, "cannot remap memory space\n"); 958 if (!priv->membase) {
949 ret = -ENXIO; 959 dev_err(&pdev->dev, "cannot remap memory space\n");
950 goto error; 960 ret = -ENXIO;
961 goto error;
962 }
963 } else {
964 /* Allocate buffer memory */
965 priv->membase = dma_alloc_coherent(NULL,
966 buffer_size, (void *)&netdev->mem_start,
967 GFP_KERNEL);
968 if (!priv->membase) {
969 dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
970 buffer_size);
971 ret = -ENOMEM;
972 goto error;
973 }
974 netdev->mem_end = netdev->mem_start + buffer_size;
975 priv->dma_alloc = buffer_size;
951 } 976 }
952 977
953 /* Allow the platform setup code to pass in a MAC address. */ 978 /* Allow the platform setup code to pass in a MAC address. */
@@ -1034,6 +1059,9 @@ free_mdio:
1034 kfree(priv->mdio->irq); 1059 kfree(priv->mdio->irq);
1035 mdiobus_free(priv->mdio); 1060 mdiobus_free(priv->mdio);
1036free: 1061free:
1062 if (priv->dma_alloc)
1063 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1064 netdev->mem_start);
1037 free_netdev(netdev); 1065 free_netdev(netdev);
1038out: 1066out:
1039 return ret; 1067 return ret;
@@ -1059,7 +1087,9 @@ static int ethoc_remove(struct platform_device *pdev)
1059 kfree(priv->mdio->irq); 1087 kfree(priv->mdio->irq);
1060 mdiobus_free(priv->mdio); 1088 mdiobus_free(priv->mdio);
1061 } 1089 }
1062 1090 if (priv->dma_alloc)
1091 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1092 netdev->mem_start);
1063 unregister_netdev(netdev); 1093 unregister_netdev(netdev);
1064 free_netdev(netdev); 1094 free_netdev(netdev);
1065 } 1095 }