aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/cfbfillrect.c
blob: ba9f58b2a5e86e65a29aa3cabc67d45087b933b9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
/*
 *  Generic fillrect for frame buffers with packed pixels of any depth.
 *
 *      Copyright (C)  2000 James Simmons (jsimmons@linux-fbdev.org)
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of this archive for
 *  more details.
 *
 * NOTES:
 *
 *  Also need to add code to deal with cards endians that are different than
 *  the native cpu endians. I also need to deal with MSB position in the word.
 *
 */
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/types.h>
#include "fb_draw.h"

#if BITS_PER_LONG == 32
#  define FB_WRITEL fb_writel
#  define FB_READL  fb_readl
#else
#  define FB_WRITEL fb_writeq
#  define FB_READL  fb_readq
#endif

    /*
     *  Aligned pattern fill using 32/64-bit memory accesses
     */

static void
bitfill_aligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
		unsigned long pat, unsigned n, int bits, u32 bswapmask)
{
	unsigned long first, last;

	if (!n)
		return;

	first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
	last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);

	if (dst_idx+n <= bits) {
		// Single word
		if (last)
			first &= last;
		FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
	} else {
		// Multiple destination words

		// Leading bits
		if (first!= ~0UL) {
			FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
			dst++;
			n -= bits - dst_idx;
		}

		// Main chunk
		n /= bits;
		while (n >= 8) {
			FB_WRITEL(pat, dst++);
			FB_WRITEL(pat, dst++);
			FB_WRITEL(pat, dst++);
			FB_WRITEL(pat, dst++);
			FB_WRITEL(pat, dst++);
			FB_WRITEL(pat, dst++);
			FB_WRITEL(pat, dst++);
			FB_WRITEL(pat, dst++);
			n -= 8;
		}
		while (n--)
			FB_WRITEL(pat, dst++);

		// Trailing bits
		if (last)
			FB_WRITEL(comp(pat, FB_READL(dst), last), dst);
	}
}


    /*
     *  Unaligned generic pattern fill using 32/64-bit memory accesses
     *  The pattern must have been expanded to a full 32/64-bit value
     *  Left/right are the appropriate shifts to convert to the pattern to be
     *  used for the next 32/64-bit word
     */

static void
bitfill_unaligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
		  unsigned long pat, int left, int right, unsigned n, int bits)
{
	unsigned long first, last;

	if (!n)
		return;

	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));

	if (dst_idx+n <= bits) {
		// Single word
		if (last)
			first &= last;
		FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
	} else {
		// Multiple destination words
		// Leading bits
		if (first) {
			FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
			dst++;
			pat = pat << left | pat >> right;
			n -= bits - dst_idx;
		}

		// Main chunk
		n /= bits;
		while (n >= 4) {
			FB_WRITEL(pat, dst++);
			pat = pat << left | pat >> right;
			FB_WRITEL(pat, dst++);
			pat = pat << left | pat >> right;
			FB_WRITEL(pat, dst++);
			pat = pat << left | pat >> right;
			FB_WRITEL(pat, dst++);
			pat = pat << left | pat >> right;
			n -= 4;
		}
		while (n--) {
			FB_WRITEL(pat, dst++);
			pat = pat << left | pat >> right;
		}

		// Trailing bits
		if (last)
			FB_WRITEL(comp(pat, FB_READL(dst), last), dst);
	}
}

    /*
     *  Aligned pattern invert using 32/64-bit memory accesses
     */
static void
bitfill_aligned_rev(struct fb_info *p, unsigned long __iomem *dst,
		    int dst_idx, unsigned long pat, unsigned n, int bits,
		    u32 bswapmask)
{
	unsigned long val = pat, dat;
	unsigned long first, last;

	if (!n)
		return;

	first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
	last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);

	if (dst_idx+n <= bits) {
		// Single word
		if (last)
			first &= last;
		dat = FB_READL(dst);
		FB_WRITEL(comp(dat ^ val, dat, first), dst);
	} else {
		// Multiple destination words
		// Leading bits
		if (first!=0UL) {
			dat = FB_READL(dst);
			FB_WRITEL(comp(dat ^ val, dat, first), dst);
			dst++;
			n -= bits - dst_idx;
		}

		// Main chunk
		n /= bits;
		while (n >= 8) {
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
			n -= 8;
		}
		while (n--) {
			FB_WRITEL(FB_READL(dst) ^ val, dst);
			dst++;
		}
		// Trailing bits
		if (last) {
			dat = FB_READL(dst);
			FB_WRITEL(comp(dat ^ val, dat, last), dst);
		}
	}
}


    /*
     *  Unaligned generic pattern invert using 32/64-bit memory accesses
     *  The pattern must have been expanded to a full 32/64-bit value
     *  Left/right are the appropriate shifts to convert to the pattern to be
     *  used for the next 32/64-bit word
     */

static void
bitfill_unaligned_rev(struct fb_info *p, unsigned long __iomem *dst,
		      int dst_idx, unsigned long pat, int left, int right,
		      unsigned n, int bits)
{
	unsigned long first, last, dat;

	if (!n)
		return;

	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));

	if (dst_idx+n <= bits) {
		// Single word
		if (last)
			first &= last;
		dat = FB_READL(dst);
		FB_WRITEL(comp(dat ^ pat, dat, first), dst);
	} else {
		// Multiple destination words

		// Leading bits
		if (first != 0UL) {
			dat = FB_READL(dst);
			FB_WRITEL(comp(dat ^ pat, dat, first), dst);
			dst++;
			pat = pat << left | pat >> right;
			n -= bits - dst_idx;
		}

		// Main chunk
		n /= bits;
		while (n >= 4) {
			FB_WRITEL(FB_READL(dst) ^ pat, dst);
			dst++;
			pat = pat << left | pat >> right;
			FB_WRITEL(FB_READL(dst) ^ pat, dst);
			dst++;
			pat = pat << left | pat >> right;
			FB_WRITEL(FB_READL(dst) ^ pat, dst);
			dst++;
			pat = pat << left | pat >> right;
			FB_WRITEL(FB_READL(dst) ^ pat, dst);
			dst++;
			pat = pat << left | pat >> right;
			n -= 4;
		}
		while (n--) {
			FB_WRITEL(FB_READL(dst) ^ pat, dst);
			dst++;
			pat = pat << left | pat >> right;
		}

		// Trailing bits
		if (last) {
			dat = FB_READL(dst);
			FB_WRITEL(comp(dat ^ pat, dat, last), dst);
		}
	}
}

void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
{
	unsigned long pat, pat2, fg;
	unsigned long width = rect->width, height = rect->height;
	int bits = BITS_PER_LONG, bytes = bits >> 3;
	u32 bpp = p->var.bits_per_pixel;
	unsigned long __iomem *dst;
	int dst_idx, left;

	if (p->state != FBINFO_STATE_RUNNING)
		return;

	if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
	    p->fix.visual == FB_VISUAL_DIRECTCOLOR )
		fg = ((u32 *) (p->pseudo_palette))[rect->color];
	else
		fg = rect->color;

	pat = pixel_to_pat(bpp, fg);

	dst = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
	dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
	dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
	/* FIXME For now we support 1-32 bpp only */
	left = bits % bpp;
	if (p->fbops->fb_sync)
		p->fbops->fb_sync(p);
	if (!left) {
		u32 bswapmask = fb_compute_bswapmask(p);
		void (*fill_op32)(struct fb_info *p,
				  unsigned long __iomem *dst, int dst_idx,
		                  unsigned long pat, unsigned n, int bits,
				  u32 bswapmask) = NULL;

		switch (rect->rop) {
		case ROP_XOR:
			fill_op32 = bitfill_aligned_rev;
			break;
		case ROP_COPY:
			fill_op32 = bitfill_aligned;
			break;
		default:
			printk( KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
			fill_op32 = bitfill_aligned;
			break;
		}
		while (height--) {
			dst += dst_idx >> (ffs(bits) - 1);
			dst_idx &= (bits - 1);
			fill_op32(p, dst, dst_idx, pat, width*bpp, bits,
				  bswapmask);
			dst_idx += p->fix.line_length*8;
		}
	} else {
		int right, r;
		void (*fill_op)(struct fb_info *p, unsigned long __iomem *dst,
				int dst_idx, unsigned long pat, int left,
				int right, unsigned n, int bits) = NULL;
#ifdef __LITTLE_ENDIAN
		right = left;
		left = bpp - right;
#else
		right = bpp - left;
#endif
		switch (rect->rop) {
		case ROP_XOR:
			fill_op = bitfill_unaligned_rev;
			break;
		case ROP_COPY:
			fill_op = bitfill_unaligned;
			break;
		default:
			printk(KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
			fill_op = bitfill_unaligned;
			break;
		}
		while (height--) {
			dst += dst_idx / bits;
			dst_idx &= (bits - 1);
			r = dst_idx % bpp;
			/* rotate pattern to the correct start position */
			pat2 = le_long_to_cpu(rolx(cpu_to_le_long(pat), r, bpp));
			fill_op(p, dst, dst_idx, pat2, left, right,
				width*bpp, bits);
			dst_idx += p->fix.line_length*8;
		}
	}
}

EXPORT_SYMBOL(cfb_fillrect);

MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
MODULE_DESCRIPTION("Generic software accelerated fill rectangle");
MODULE_LICENSE("GPL");
="hl opt">*membase; unsigned int num_tx; unsigned int cur_tx; unsigned int dty_tx; unsigned int num_rx; unsigned int cur_rx; struct net_device *netdev; struct napi_struct napi; struct net_device_stats stats; u32 msg_enable; spinlock_t rx_lock; spinlock_t lock; struct phy_device *phy; struct mii_bus *mdio; s8 phy_id; }; /** * struct ethoc_bd - buffer descriptor * @stat: buffer statistics * @addr: physical memory address */ struct ethoc_bd { u32 stat; u32 addr; }; static u32 ethoc_read(struct ethoc *dev, loff_t offset) { return ioread32(dev->iobase + offset); } static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) { iowrite32(data, dev->iobase + offset); } static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); bd->stat = ethoc_read(dev, offset + 0); bd->addr = ethoc_read(dev, offset + 4); } static void ethoc_write_bd(struct ethoc *dev, int index, const struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); ethoc_write(dev, offset + 0, bd->stat); ethoc_write(dev, offset + 4, bd->addr); } static void ethoc_enable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask |= mask; ethoc_write(dev, INT_MASK, imask); } static void ethoc_disable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask &= ~mask; ethoc_write(dev, INT_MASK, imask); } static void ethoc_ack_irq(struct ethoc *dev, u32 mask) { ethoc_write(dev, INT_SOURCE, mask); } static void ethoc_enable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode |= MODER_RXEN | MODER_TXEN; ethoc_write(dev, MODER, mode); } static void ethoc_disable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode &= ~(MODER_RXEN | MODER_TXEN); ethoc_write(dev, MODER, mode); } static int ethoc_init_ring(struct ethoc *dev) { struct ethoc_bd bd; int i; dev->cur_tx = 0; dev->dty_tx = 0; dev->cur_rx = 0; /* setup transmission buffers */ bd.addr = 0; bd.stat = TX_BD_IRQ | TX_BD_CRC; for (i = 0; i < dev->num_tx; i++) { if (i == dev->num_tx - 1) bd.stat |= TX_BD_WRAP; ethoc_write_bd(dev, i, &bd); bd.addr += ETHOC_BUFSIZ; } bd.addr = dev->num_tx * ETHOC_BUFSIZ; bd.stat = RX_BD_EMPTY | RX_BD_IRQ; for (i = 0; i < dev->num_rx; i++) { if (i == dev->num_rx - 1) bd.stat |= RX_BD_WRAP; ethoc_write_bd(dev, dev->num_tx + i, &bd); bd.addr += ETHOC_BUFSIZ; } return 0; } static int ethoc_reset(struct ethoc *dev) { u32 mode; /* TODO: reset controller? */ ethoc_disable_rx_and_tx(dev); /* TODO: setup registers */ /* enable FCS generation and automatic padding */ mode = ethoc_read(dev, MODER); mode |= MODER_CRC | MODER_PAD; ethoc_write(dev, MODER, mode); /* set full-duplex mode */ mode = ethoc_read(dev, MODER); mode |= MODER_FULLD; ethoc_write(dev, MODER, mode); ethoc_write(dev, IPGT, 0x15); ethoc_ack_irq(dev, INT_MASK_ALL); ethoc_enable_irq(dev, INT_MASK_ALL); ethoc_enable_rx_and_tx(dev); return 0; } static unsigned int ethoc_update_rx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; unsigned int ret = 0; if (bd->stat & RX_BD_TL) { dev_err(&netdev->dev, "RX: frame too long\n"); dev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_SF) { dev_err(&netdev->dev, "RX: frame too short\n"); dev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_DN) { dev_err(&netdev->dev, "RX: dribble nibble\n"); dev->stats.rx_frame_errors++; } if (bd->stat & RX_BD_CRC) { dev_err(&netdev->dev, "RX: wrong CRC\n"); dev->stats.rx_crc_errors++; ret++; } if (bd->stat & RX_BD_OR) { dev_err(&netdev->dev, "RX: overrun\n"); dev->stats.rx_over_errors++; ret++; } if (bd->stat & RX_BD_MISS) dev->stats.rx_missed_errors++; if (bd->stat & RX_BD_LC) { dev_err(&netdev->dev, "RX: late collision\n"); dev->stats.collisions++; ret++; } return ret; } static int ethoc_rx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; for (count = 0; count < limit; ++count) { unsigned int entry; struct ethoc_bd bd; entry = priv->num_tx + (priv->cur_rx % priv->num_rx); ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) break; if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb = netdev_alloc_skb(dev, size); if (likely(skb)) { void *src = priv->membase + bd.addr; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); dev->last_rx = jiffies; priv->stats.rx_packets++; priv->stats.rx_bytes += size; netif_receive_skb(skb); } else { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); priv->stats.rx_dropped++; break; } } /* clear the buffer descriptor so it can be reused */ bd.stat &= ~RX_BD_STATS; bd.stat |= RX_BD_EMPTY; ethoc_write_bd(priv, entry, &bd); priv->cur_rx++; } return count; } static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; if (bd->stat & TX_BD_LC) { dev_err(&netdev->dev, "TX: late collision\n"); dev->stats.tx_window_errors++; } if (bd->stat & TX_BD_RL) { dev_err(&netdev->dev, "TX: retransmit limit\n"); dev->stats.tx_aborted_errors++; } if (bd->stat & TX_BD_UR) { dev_err(&netdev->dev, "TX: underrun\n"); dev->stats.tx_fifo_errors++; } if (bd->stat & TX_BD_CS) { dev_err(&netdev->dev, "TX: carrier sense lost\n"); dev->stats.tx_carrier_errors++; } if (bd->stat & TX_BD_STATS) dev->stats.tx_errors++; dev->stats.collisions += (bd->stat >> 4) & 0xf; dev->stats.tx_bytes += bd->stat >> 16; dev->stats.tx_packets++; return 0; } static void ethoc_tx(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); spin_lock(&priv->lock); while (priv->dty_tx != priv->cur_tx) { unsigned int entry = priv->dty_tx % priv->num_tx; struct ethoc_bd bd; ethoc_read_bd(priv, entry, &bd); if (bd.stat & TX_BD_READY) break; entry = (++priv->dty_tx) % priv->num_tx; (void)ethoc_update_tx_stats(priv, &bd); } if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) netif_wake_queue(dev); ethoc_ack_irq(priv, INT_MASK_TX); spin_unlock(&priv->lock); } static irqreturn_t ethoc_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct ethoc *priv = netdev_priv(dev); u32 pending; ethoc_disable_irq(priv, INT_MASK_ALL); pending = ethoc_read(priv, INT_SOURCE); if (unlikely(pending == 0)) { ethoc_enable_irq(priv, INT_MASK_ALL); return IRQ_NONE; } ethoc_ack_irq(priv, INT_MASK_ALL); if (pending & INT_MASK_BUSY) { dev_err(&dev->dev, "packet dropped\n"); priv->stats.rx_dropped++; } if (pending & INT_MASK_RX) { if (napi_schedule_prep(&priv->napi)) __napi_schedule(&priv->napi); } else { ethoc_enable_irq(priv, INT_MASK_RX); } if (pending & INT_MASK_TX) ethoc_tx(dev); ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX); return IRQ_HANDLED; } static int ethoc_get_mac_address(struct net_device *dev, void *addr) { struct ethoc *priv = netdev_priv(dev); u8 *mac = (u8 *)addr; u32 reg; reg = ethoc_read(priv, MAC_ADDR0); mac[2] = (reg >> 24) & 0xff; mac[3] = (reg >> 16) & 0xff; mac[4] = (reg >> 8) & 0xff; mac[5] = (reg >> 0) & 0xff; reg = ethoc_read(priv, MAC_ADDR1); mac[0] = (reg >> 8) & 0xff; mac[1] = (reg >> 0) & 0xff; return 0; } static int ethoc_poll(struct napi_struct *napi, int budget) { struct ethoc *priv = container_of(napi, struct ethoc, napi); int work_done = 0; work_done = ethoc_rx(priv->netdev, budget); if (work_done < budget) { ethoc_enable_irq(priv, INT_MASK_RX); napi_complete(napi); } return work_done; } static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) { unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT; struct ethoc *priv = bus->priv; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); while (time_before(jiffies, timeout)) { u32 status = ethoc_read(priv, MIISTATUS); if (!(status & MIISTATUS_BUSY)) { u32 data = ethoc_read(priv, MIIRX_DATA); /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return data; } schedule(); } return -EBUSY; } static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT; struct ethoc *priv = bus->priv; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIITX_DATA, val); ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); while (time_before(jiffies, timeout)) { u32 stat = ethoc_read(priv, MIISTATUS); if (!(stat & MIISTATUS_BUSY)) return 0; schedule(); } return -EBUSY; } static int ethoc_mdio_reset(struct mii_bus *bus) { return 0; } static void ethoc_mdio_poll(struct net_device *dev) { } static int ethoc_mdio_probe(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct phy_device *phy; int i; for (i = 0; i < PHY_MAX_ADDR; i++) { phy = priv->mdio->phy_map[i]; if (phy) { if (priv->phy_id != -1) { /* attach to specified PHY */ if (priv->phy_id == phy->addr) break; } else { /* autoselect PHY if none was specified */ if (phy->addr != 0) break; } } } if (!phy) { dev_err(&dev->dev, "no PHY found\n"); return -ENXIO; } phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy)) { dev_err(&dev->dev, "could not attach to PHY\n"); return PTR_ERR(phy); } priv->phy = phy; return 0; } static int ethoc_open(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); unsigned int min_tx = 2; unsigned int num_bd; int ret; ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; /* calculate the number of TX/RX buffers */ num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; priv->num_tx = min(min_tx, num_bd / 4); priv->num_rx = num_bd - priv->num_tx; ethoc_write(priv, TX_BD_NUM, priv->num_tx); ethoc_init_ring(priv); ethoc_reset(priv); if (netif_queue_stopped(dev)) { dev_dbg(&dev->dev, " resuming queue\n"); netif_wake_queue(dev); } else { dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); } phy_start(priv->phy); napi_enable(&priv->napi); if (netif_msg_ifup(priv)) { dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", dev->base_addr, dev->mem_start, dev->mem_end); } return 0; } static int ethoc_stop(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); napi_disable(&priv->napi); if (priv->phy) phy_stop(priv->phy); ethoc_disable_rx_and_tx(priv); free_irq(dev->irq, dev); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); return 0; } static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ethoc *priv = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); struct phy_device *phy = NULL; if (!netif_running(dev)) return -EINVAL; if (cmd != SIOCGMIIPHY) { if (mdio->phy_id >= PHY_MAX_ADDR) return -ERANGE; phy = priv->mdio->phy_map[mdio->phy_id]; if (!phy) return -ENODEV; } else { phy = priv->phy; } return phy_mii_ioctl(phy, mdio, cmd); } static int ethoc_config(struct net_device *dev, struct ifmap *map) { return -ENOSYS; } static int ethoc_set_mac_address(struct net_device *dev, void *addr) { struct ethoc *priv = netdev_priv(dev); u8 *mac = (u8 *)addr; ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | (mac[5] << 0)); ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); return 0; } static void ethoc_set_multicast_list(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 mode = ethoc_read(priv, MODER); struct dev_mc_list *mc = NULL; u32 hash[2] = { 0, 0 }; /* set loopback mode if requested */ if (dev->flags & IFF_LOOPBACK) mode |= MODER_LOOP; else mode &= ~MODER_LOOP; /* receive broadcast frames if requested */ if (dev->flags & IFF_BROADCAST) mode &= ~MODER_BRO; else mode |= MODER_BRO; /* enable promiscuous mode if requested */ if (dev->flags & IFF_PROMISC) mode |= MODER_PRO; else mode &= ~MODER_PRO; ethoc_write(priv, MODER, mode); /* receive multicast frames */ if (dev->flags & IFF_ALLMULTI) { hash[0] = 0xffffffff; hash[1] = 0xffffffff; } else { for (mc = dev->mc_list; mc; mc = mc->next) { u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr); int bit = (crc >> 26) & 0x3f; hash[bit >> 5] |= 1 << (bit & 0x1f); } } ethoc_write(priv, ETH_HASH0, hash[0]); ethoc_write(priv, ETH_HASH1, hash[1]); } static int ethoc_change_mtu(struct net_device *dev, int new_mtu) { return -ENOSYS; } static void ethoc_tx_timeout(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 pending = ethoc_read(priv, INT_SOURCE); if (likely(pending)) ethoc_interrupt(dev->irq, dev); } static struct net_device_stats *ethoc_stats(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); return &priv->stats; } static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct ethoc_bd bd; unsigned int entry; void *dest; if (unlikely(skb->len > ETHOC_BUFSIZ)) { priv->stats.tx_errors++; return -EMSGSIZE; } entry = priv->cur_tx % priv->num_tx; spin_lock_irq(&priv->lock); priv->cur_tx++; ethoc_read_bd(priv, entry, &bd); if (unlikely(skb->len < ETHOC_ZLEN)) bd.stat |= TX_BD_PAD; else bd.stat &= ~TX_BD_PAD; dest = priv->membase + bd.addr; memcpy_toio(dest, skb->data, skb->len); bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); bd.stat |= TX_BD_LEN(skb->len); ethoc_write_bd(priv, entry, &bd); bd.stat |= TX_BD_READY; ethoc_write_bd(priv, entry, &bd); if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { dev_dbg(&dev->dev, "stopping queue\n"); netif_stop_queue(dev); } dev->trans_start = jiffies; dev_kfree_skb(skb); spin_unlock_irq(&priv->lock); return NETDEV_TX_OK; } static const struct net_device_ops ethoc_netdev_ops = { .ndo_open = ethoc_open, .ndo_stop = ethoc_stop, .ndo_do_ioctl = ethoc_ioctl, .ndo_set_config = ethoc_config, .ndo_set_mac_address = ethoc_set_mac_address, .ndo_set_multicast_list = ethoc_set_multicast_list, .ndo_change_mtu = ethoc_change_mtu, .ndo_tx_timeout = ethoc_tx_timeout, .ndo_get_stats = ethoc_stats, .ndo_start_xmit = ethoc_start_xmit, }; /** * ethoc_probe() - initialize OpenCores ethernet MAC * pdev: platform device */ static int ethoc_probe(struct platform_device *pdev) { struct net_device *netdev = NULL; struct resource *res = NULL; struct resource *mmio = NULL; struct resource *mem = NULL; struct ethoc *priv = NULL; unsigned int phy; int ret = 0; /* allocate networking device */ netdev = alloc_etherdev(sizeof(struct ethoc)); if (!netdev) { dev_err(&pdev->dev, "cannot allocate network device\n"); ret = -ENOMEM; goto out; } SET_NETDEV_DEV(netdev, &pdev->dev); platform_set_drvdata(pdev, netdev); /* obtain I/O memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); ret = -ENXIO; goto free; } mmio = devm_request_mem_region(&pdev->dev, res->start, res->end - res->start + 1, res->name); if (!res) { dev_err(&pdev->dev, "cannot request I/O memory space\n"); ret = -ENXIO; goto free; } netdev->base_addr = mmio->start; /* obtain buffer memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(&pdev->dev, "cannot obtain memory space\n"); ret = -ENXIO; goto free; } mem = devm_request_mem_region(&pdev->dev, res->start, res->end - res->start + 1, res->name); if (!mem) { dev_err(&pdev->dev, "cannot request memory space\n"); ret = -ENXIO; goto free; } netdev->mem_start = mem->start; netdev->mem_end = mem->end; /* obtain device IRQ number */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain IRQ\n"); ret = -ENXIO; goto free; } netdev->irq = res->start; /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, mmio->end - mmio->start + 1); if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); ret = -ENXIO; goto error; } priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, mem->end - mem->start + 1); if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); ret = -ENXIO; goto error; } /* Allow the platform setup code to pass in a MAC address. */ if (pdev->dev.platform_data) { struct ethoc_platform_data *pdata = (struct ethoc_platform_data *)pdev->dev.platform_data; memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); priv->phy_id = pdata->phy_id; } /* Check that the given MAC address is valid. If it isn't, read the * current MAC from the controller. */ if (!is_valid_ether_addr(netdev->dev_addr)) ethoc_get_mac_address(netdev, netdev->dev_addr); /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ if (!is_valid_ether_addr(netdev->dev_addr)) random_ether_addr(netdev->dev_addr); ethoc_set_mac_address(netdev, netdev->dev_addr); /* register MII bus */ priv->mdio = mdiobus_alloc(); if (!priv->mdio) { ret = -ENOMEM; goto free; } priv->mdio->name = "ethoc-mdio"; snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", priv->mdio->name, pdev->id); priv->mdio->read = ethoc_mdio_read; priv->mdio->write = ethoc_mdio_write; priv->mdio->reset = ethoc_mdio_reset; priv->mdio->priv = priv; priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!priv->mdio->irq) { ret = -ENOMEM; goto free_mdio; } for (phy = 0; phy < PHY_MAX_ADDR; phy++) priv->mdio->irq[phy] = PHY_POLL; ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); goto free_mdio; } ret = ethoc_mdio_probe(netdev); if (ret) { dev_err(&netdev->dev, "failed to probe MDIO bus\n"); goto error; } ether_setup(netdev); /* setup the net_device structure */ netdev->netdev_ops = &ethoc_netdev_ops; netdev->watchdog_timeo = ETHOC_TIMEOUT; netdev->features |= 0; /* setup NAPI */ memset(&priv->napi, 0, sizeof(priv->napi)); netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); spin_lock_init(&priv->rx_lock); spin_lock_init(&priv->lock); ret = register_netdev(netdev); if (ret < 0) { dev_err(&netdev->dev, "failed to register interface\n"); goto error; } goto out; error: mdiobus_unregister(priv->mdio); free_mdio: kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: free_netdev(netdev); out: return ret; } /** * ethoc_remove() - shutdown OpenCores ethernet MAC * @pdev: platform device */ static int ethoc_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); platform_set_drvdata(pdev, NULL); if (netdev) { phy_disconnect(priv->phy); priv->phy = NULL; if (priv->mdio) { mdiobus_unregister(priv->mdio); kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } unregister_netdev(netdev); free_netdev(netdev); } return 0; } #ifdef CONFIG_PM static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) { return -ENOSYS; } static int ethoc_resume(struct platform_device *pdev) { return -ENOSYS; } #else # define ethoc_suspend NULL # define ethoc_resume NULL #endif static struct platform_driver ethoc_driver = { .probe = ethoc_probe, .remove = ethoc_remove, .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { .name = "ethoc", }, }; static int __init ethoc_init(void) { return platform_driver_register(&ethoc_driver); } static void __exit ethoc_exit(void) { platform_driver_unregister(&ethoc_driver); } module_init(ethoc_init); module_exit(ethoc_exit); MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); MODULE_LICENSE("GPL v2");