aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.h
blob: 97a24c7795a4cf11ef6c68e7e8f3ef212af62e98 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/*
 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#ifndef	__XFS_LOG_H__
#define __XFS_LOG_H__

/* get lsn fields */

#define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
#define BLOCK_LSN(lsn) ((uint)(lsn))

/* this is used in a spot where we might otherwise double-endian-flip */
#define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0])

#ifdef __KERNEL__
/*
 * By comparing each component, we don't have to worry about extra
 * endian issues in treating two 32 bit numbers as one 64 bit number
 */
static inline xfs_lsn_t	_lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
{
	if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
		return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;

	if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
		return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;

	return 0;
}

#define	XFS_LSN_CMP(x,y) _lsn_cmp(x,y)

/*
 * Macros, structures, prototypes for interface to the log manager.
 */

/*
 * Flags to xfs_log_done()
 */
#define XFS_LOG_REL_PERM_RESERV	0x1

/*
 * Flags to xfs_log_reserve()
 *
 *	XFS_LOG_SLEEP:	 If space is not available, sleep (default)
 *	XFS_LOG_NOSLEEP: If space is not available, return error
 *	XFS_LOG_PERM_RESERV: Permanent reservation.  When writes are
 *		performed against this type of reservation, the reservation
 *		is not decreased.  Long running transactions should use this.
 */
#define XFS_LOG_SLEEP		0x0
#define XFS_LOG_NOSLEEP		0x1
#define XFS_LOG_PERM_RESERV	0x2

/*
 * Flags to xfs_log_force()
 *
 *	XFS_LOG_SYNC:	Synchronous force in-core log to disk
 */
#define XFS_LOG_SYNC		0x1

#endif	/* __KERNEL__ */


/* Log Clients */
#define XFS_TRANSACTION		0x69
#define XFS_VOLUME		0x2
#define XFS_LOG			0xaa


/* Region types for iovec's i_type */
#define XLOG_REG_TYPE_BFORMAT		1
#define XLOG_REG_TYPE_BCHUNK		2
#define XLOG_REG_TYPE_EFI_FORMAT	3
#define XLOG_REG_TYPE_EFD_FORMAT	4
#define XLOG_REG_TYPE_IFORMAT		5
#define XLOG_REG_TYPE_ICORE		6
#define XLOG_REG_TYPE_IEXT		7
#define XLOG_REG_TYPE_IBROOT		8
#define XLOG_REG_TYPE_ILOCAL		9
#define XLOG_REG_TYPE_IATTR_EXT		10
#define XLOG_REG_TYPE_IATTR_BROOT	11
#define XLOG_REG_TYPE_IATTR_LOCAL	12
#define XLOG_REG_TYPE_QFORMAT		13
#define XLOG_REG_TYPE_DQUOT		14
#define XLOG_REG_TYPE_QUOTAOFF		15
#define XLOG_REG_TYPE_LRHEADER		16
#define XLOG_REG_TYPE_UNMOUNT		17
#define XLOG_REG_TYPE_COMMIT		18
#define XLOG_REG_TYPE_TRANSHDR		19
#define XLOG_REG_TYPE_MAX		19

typedef struct xfs_log_iovec {
	xfs_caddr_t	i_addr;		/* beginning address of region */
	int		i_len;		/* length in bytes of region */
	uint		i_type;		/* type of region */
} xfs_log_iovec_t;

/*
 * Structure used to pass callback function and the function's argument
 * to the log manager.
 */
typedef struct xfs_log_callback {
	struct xfs_log_callback	*cb_next;
	void			(*cb_func)(void *, int);
	void			*cb_arg;
} xfs_log_callback_t;


#ifdef __KERNEL__
/* Log manager interfaces */
struct xfs_mount;
struct xlog_in_core;
struct xlog_ticket;

xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
		       struct xlog_ticket *ticket,
		       struct xlog_in_core **iclog,
		       uint		flags);
int	  _xfs_log_force(struct xfs_mount *mp,
			 uint		flags,
			 int		*log_forced);
void	  xfs_log_force(struct xfs_mount	*mp,
			uint			flags);
int	  _xfs_log_force_lsn(struct xfs_mount *mp,
			     xfs_lsn_t		lsn,
			     uint		flags,
			     int		*log_forced);
void	  xfs_log_force_lsn(struct xfs_mount	*mp,
			    xfs_lsn_t		lsn,
			    uint		flags);
int	  xfs_log_mount(struct xfs_mount	*mp,
			struct xfs_buftarg	*log_target,
			xfs_daddr_t		start_block,
			int		 	num_bblocks);
int	  xfs_log_mount_finish(struct xfs_mount *mp);
void	  xfs_log_move_tail(struct xfs_mount	*mp,
			    xfs_lsn_t		tail_lsn);
int	  xfs_log_notify(struct xfs_mount	*mp,
			 struct xlog_in_core	*iclog,
			 xfs_log_callback_t	*callback_entry);
int	  xfs_log_release_iclog(struct xfs_mount *mp,
			 struct xlog_in_core	 *iclog);
int	  xfs_log_reserve(struct xfs_mount *mp,
			  int		   length,
			  int		   count,
			  struct xlog_ticket **ticket,
			  __uint8_t	   clientid,
			  uint		   flags,
			  uint		   t_type);
int	  xfs_log_write(struct xfs_mount *mp,
			xfs_log_iovec_t  region[],
			int		 nentries,
			struct xlog_ticket *ticket,
			xfs_lsn_t	 *start_lsn);
int	  xfs_log_unmount_write(struct xfs_mount *mp);
void      xfs_log_unmount(struct xfs_mount *mp);
int	  xfs_log_force_umount(struct xfs_mount *mp, int logerror);
int	  xfs_log_need_covered(struct xfs_mount *mp);

void	  xlog_iodone(struct xfs_buf *);

struct xlog_ticket * xfs_log_ticket_get(struct xlog_ticket *ticket);
void	  xfs_log_ticket_put(struct xlog_ticket *ticket);

#endif


extern int xlog_debug;		/* set to 1 to enable real log */


#endif	/* __XFS_LOG_H__ */
'>639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
/*
 * sonic.c
 *
 * (C) 2005 Finn Thain
 *
 * Converted to DMA API, added zero-copy buffer handling, and
 * (from the mac68k project) introduced dhd's support for 16-bit cards.
 *
 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
 *
 * This driver is based on work from Andreas Busse, but most of
 * the code is rewritten.
 *
 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
 *
 *    Core code included by system sonic drivers
 *
 * And... partially rewritten again by David Huggins-Daines in order
 * to cope with screwed up Macintosh NICs that may or may not use
 * 16-bit DMA.
 *
 * (C) 1999 David Huggins-Daines <dhd@debian.org>
 *
 */

/*
 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
 * National Semiconductors data sheet for the DP83932B Sonic Ethernet
 * controller, and the files "8390.c" and "skeleton.c" in this directory.
 *
 * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
 * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
 * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
 */



/*
 * Open/initialize the SONIC controller.
 *
 * This routine should set everything up anew at each open, even
 *  registers that "should" only need to be set once at boot, so that
 *  there is non-reboot way to recover if something goes wrong.
 */
static int sonic_open(struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	int i;

	if (sonic_debug > 2)
		printk("sonic_open: initializing sonic driver.\n");

	for (i = 0; i < SONIC_NUM_RRS; i++) {
		struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
		if (skb == NULL) {
			while(i > 0) { /* free any that were allocated successfully */
				i--;
				dev_kfree_skb(lp->rx_skb[i]);
				lp->rx_skb[i] = NULL;
			}
			printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
			       dev->name);
			return -ENOMEM;
		}
		/* align IP header unless DMA requires otherwise */
		if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
			skb_reserve(skb, 2);
		lp->rx_skb[i] = skb;
	}

	for (i = 0; i < SONIC_NUM_RRS; i++) {
		dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
		                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
		if (!laddr) {
			while(i > 0) { /* free any that were mapped successfully */
				i--;
				dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
				lp->rx_laddr[i] = (dma_addr_t)0;
			}
			for (i = 0; i < SONIC_NUM_RRS; i++) {
				dev_kfree_skb(lp->rx_skb[i]);
				lp->rx_skb[i] = NULL;
			}
			printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
			       dev->name);
			return -ENOMEM;
		}
		lp->rx_laddr[i] = laddr;
	}

	/*
	 * Initialize the SONIC
	 */
	sonic_init(dev);

	netif_start_queue(dev);

	if (sonic_debug > 2)
		printk("sonic_open: Initialization done.\n");

	return 0;
}


/*
 * Close the SONIC device
 */
static int sonic_close(struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	int i;

	if (sonic_debug > 2)
		printk("sonic_close\n");

	netif_stop_queue(dev);

	/*
	 * stop the SONIC, disable interrupts
	 */
	SONIC_WRITE(SONIC_IMR, 0);
	SONIC_WRITE(SONIC_ISR, 0x7fff);
	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);

	/* unmap and free skbs that haven't been transmitted */
	for (i = 0; i < SONIC_NUM_TDS; i++) {
		if(lp->tx_laddr[i]) {
			dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
			lp->tx_laddr[i] = (dma_addr_t)0;
		}
		if(lp->tx_skb[i]) {
			dev_kfree_skb(lp->tx_skb[i]);
			lp->tx_skb[i] = NULL;
		}
	}

	/* unmap and free the receive buffers */
	for (i = 0; i < SONIC_NUM_RRS; i++) {
		if(lp->rx_laddr[i]) {
			dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
			lp->rx_laddr[i] = (dma_addr_t)0;
		}
		if(lp->rx_skb[i]) {
			dev_kfree_skb(lp->rx_skb[i]);
			lp->rx_skb[i] = NULL;
		}
	}

	return 0;
}

static void sonic_tx_timeout(struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	int i;
	/*
	 * put the Sonic into software-reset mode and
	 * disable all interrupts before releasing DMA buffers
	 */
	SONIC_WRITE(SONIC_IMR, 0);
	SONIC_WRITE(SONIC_ISR, 0x7fff);
	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
	/* We could resend the original skbs. Easier to re-initialise. */
	for (i = 0; i < SONIC_NUM_TDS; i++) {
		if(lp->tx_laddr[i]) {
			dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
			lp->tx_laddr[i] = (dma_addr_t)0;
		}
		if(lp->tx_skb[i]) {
			dev_kfree_skb(lp->tx_skb[i]);
			lp->tx_skb[i] = NULL;
		}
	}
	/* Try to restart the adaptor. */
	sonic_init(dev);
	lp->stats.tx_errors++;
	dev->trans_start = jiffies; /* prevent tx timeout */
	netif_wake_queue(dev);
}

/*
 * transmit packet
 *
 * Appends new TD during transmission thus avoiding any TX interrupts
 * until we run out of TDs.
 * This routine interacts closely with the ISR in that it may,
 *   set tx_skb[i]
 *   reset the status flags of the new TD
 *   set and reset EOL flags
 *   stop the tx queue
 * The ISR interacts with this routine in various ways. It may,
 *   reset tx_skb[i]
 *   test the EOL and status flags of the TDs
 *   wake the tx queue
 * Concurrently with all of this, the SONIC is potentially writing to
 * the status flags of the TDs.
 * Until some mutual exclusion is added, this code will not work with SMP. However,
 * MIPS Jazz machines and m68k Macs were all uni-processor machines.
 */

static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	dma_addr_t laddr;
	int length;
	int entry = lp->next_tx;

	if (sonic_debug > 2)
		printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);

	length = skb->len;
	if (length < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			return NETDEV_TX_OK;
		length = ETH_ZLEN;
	}

	/*
	 * Map the packet data into the logical DMA address space
	 */

	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
	if (!laddr) {
		printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
		dev_kfree_skb(skb);
		return NETDEV_TX_BUSY;
	}

	sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
	sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
	sonic_tda_put(dev, entry, SONIC_TD_LINK,
		sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);

	/*
	 * Must set tx_skb[entry] only after clearing status, and
	 * before clearing EOL and before stopping queue
	 */
	wmb();
	lp->tx_len[entry] = length;
	lp->tx_laddr[entry] = laddr;
	lp->tx_skb[entry] = skb;

	wmb();
	sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
				  sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
	lp->eol_tx = entry;

	lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
	if (lp->tx_skb[lp->next_tx] != NULL) {
		/* The ring is full, the ISR has yet to process the next TD. */
		if (sonic_debug > 3)
			printk("%s: stopping queue\n", dev->name);
		netif_stop_queue(dev);
		/* after this packet, wait for ISR to free up some TDAs */
	} else netif_start_queue(dev);

	if (sonic_debug > 2)
		printk("sonic_send_packet: issuing Tx command\n");

	SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);

	return NETDEV_TX_OK;
}

/*
 * The typical workload of the driver:
 * Handle the network interface interrupts.
 */
static irqreturn_t sonic_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct sonic_local *lp = netdev_priv(dev);
	int status;

	if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
		return IRQ_NONE;

	do {
		if (status & SONIC_INT_PKTRX) {
			if (sonic_debug > 2)
				printk("%s: packet rx\n", dev->name);
			sonic_rx(dev);	/* got packet(s) */
			SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
		}

		if (status & SONIC_INT_TXDN) {
			int entry = lp->cur_tx;
			int td_status;
			int freed_some = 0;

			/* At this point, cur_tx is the index of a TD that is one of:
			 *   unallocated/freed                          (status set   & tx_skb[entry] clear)
			 *   allocated and sent                         (status set   & tx_skb[entry] set  )
			 *   allocated and not yet sent                 (status clear & tx_skb[entry] set  )
			 *   still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
			 */

			if (sonic_debug > 2)
				printk("%s: tx done\n", dev->name);

			while (lp->tx_skb[entry] != NULL) {
				if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
					break;

				if (td_status & 0x0001) {
					lp->stats.tx_packets++;
					lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
				} else {
					lp->stats.tx_errors++;
					if (td_status & 0x0642)
						lp->stats.tx_aborted_errors++;
					if (td_status & 0x0180)
						lp->stats.tx_carrier_errors++;
					if (td_status & 0x0020)
						lp->stats.tx_window_errors++;
					if (td_status & 0x0004)
						lp->stats.tx_fifo_errors++;
				}

				/* We must free the original skb */
				dev_kfree_skb_irq(lp->tx_skb[entry]);
				lp->tx_skb[entry] = NULL;
				/* and unmap DMA buffer */
				dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
				lp->tx_laddr[entry] = (dma_addr_t)0;
				freed_some = 1;

				if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
					entry = (entry + 1) & SONIC_TDS_MASK;
					break;
				}
				entry = (entry + 1) & SONIC_TDS_MASK;
			}

			if (freed_some || lp->tx_skb[entry] == NULL)
				netif_wake_queue(dev);  /* The ring is no longer full */
			lp->cur_tx = entry;
			SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
		}

		/*
		 * check error conditions
		 */
		if (status & SONIC_INT_RFO) {
			if (sonic_debug > 1)
				printk("%s: rx fifo overrun\n", dev->name);
			lp->stats.rx_fifo_errors++;
			SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
		}
		if (status & SONIC_INT_RDE) {
			if (sonic_debug > 1)
				printk("%s: rx descriptors exhausted\n", dev->name);
			lp->stats.rx_dropped++;
			SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
		}
		if (status & SONIC_INT_RBAE) {
			if (sonic_debug > 1)
				printk("%s: rx buffer area exceeded\n", dev->name);
			lp->stats.rx_dropped++;
			SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
		}

		/* counter overruns; all counters are 16bit wide */
		if (status & SONIC_INT_FAE) {
			lp->stats.rx_frame_errors += 65536;
			SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
		}
		if (status & SONIC_INT_CRC) {
			lp->stats.rx_crc_errors += 65536;
			SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
		}
		if (status & SONIC_INT_MP) {
			lp->stats.rx_missed_errors += 65536;
			SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
		}

		/* transmit error */
		if (status & SONIC_INT_TXER) {
			if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
				printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
			SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
		}

		/* bus retry */
		if (status & SONIC_INT_BR) {
			printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
				dev->name);
			/* ... to help debug DMA problems causing endless interrupts. */
			/* Bounce the eth interface to turn on the interrupt again. */
			SONIC_WRITE(SONIC_IMR, 0);
			SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
		}

		/* load CAM done */
		if (status & SONIC_INT_LCD)
			SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
	} while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
	return IRQ_HANDLED;
}

/*
 * We have a good packet(s), pass it/them up the network stack.
 */
static void sonic_rx(struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	int status;
	int entry = lp->cur_rx;

	while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
		struct sk_buff *used_skb;
		struct sk_buff *new_skb;
		dma_addr_t new_laddr;
		u16 bufadr_l;
		u16 bufadr_h;
		int pkt_len;

		status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
		if (status & SONIC_RCR_PRX) {
			/* Malloc up new buffer. */
			new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
			if (new_skb == NULL) {
				printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
				lp->stats.rx_dropped++;
				break;
			}
			/* provide 16 byte IP header alignment unless DMA requires otherwise */
			if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
				skb_reserve(new_skb, 2);

			new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
		                               SONIC_RBSIZE, DMA_FROM_DEVICE);
			if (!new_laddr) {
				dev_kfree_skb(new_skb);
				printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
				lp->stats.rx_dropped++;
				break;
			}

			/* now we have a new skb to replace it, pass the used one up the stack */
			dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
			used_skb = lp->rx_skb[entry];
			pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
			skb_trim(used_skb, pkt_len);
			used_skb->protocol = eth_type_trans(used_skb, dev);
			netif_rx(used_skb);
			lp->stats.rx_packets++;
			lp->stats.rx_bytes += pkt_len;

			/* and insert the new skb */
			lp->rx_laddr[entry] = new_laddr;
			lp->rx_skb[entry] = new_skb;

			bufadr_l = (unsigned long)new_laddr & 0xffff;
			bufadr_h = (unsigned long)new_laddr >> 16;
			sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
			sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
		} else {
			/* This should only happen, if we enable accepting broken packets. */
			lp->stats.rx_errors++;
			if (status & SONIC_RCR_FAER)
				lp->stats.rx_frame_errors++;
			if (status & SONIC_RCR_CRCR)
				lp->stats.rx_crc_errors++;
		}
		if (status & SONIC_RCR_LPKT) {
			/*
			 * this was the last packet out of the current receive buffer
			 * give the buffer back to the SONIC
			 */
			lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
			if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
			SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
			if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
				if (sonic_debug > 2)
					printk("%s: rx buffer exhausted\n", dev->name);
				SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
			}
		} else
			printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
			     dev->name);
		/*
		 * give back the descriptor
		 */
		sonic_rda_put(dev, entry, SONIC_RD_LINK,
			sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
		sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
		sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
			sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
		lp->eol_rx = entry;
		lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
	}
	/*
	 * If any worth-while packets have been received, netif_rx()
	 * has done a mark_bh(NET_BH) for us and will work on them
	 * when we get to the bottom-half routine.
	 */
}


/*
 * Get the current statistics.
 * This may be called with the device open or closed.
 */
static struct net_device_stats *sonic_get_stats(struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);

	/* read the tally counter from the SONIC and reset them */
	lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
	SONIC_WRITE(SONIC_CRCT, 0xffff);
	lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
	SONIC_WRITE(SONIC_FAET, 0xffff);
	lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
	SONIC_WRITE(SONIC_MPT, 0xffff);

	return &lp->stats;
}


/*
 * Set or clear the multicast filter for this adaptor.
 */
static void sonic_multicast_list(struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	unsigned int rcr;
	struct netdev_hw_addr *ha;
	unsigned char *addr;
	int i;

	rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
	rcr |= SONIC_RCR_BRD;	/* accept broadcast packets */

	if (dev->flags & IFF_PROMISC) {	/* set promiscuous mode */
		rcr |= SONIC_RCR_PRO;
	} else {
		if ((dev->flags & IFF_ALLMULTI) ||
		    (netdev_mc_count(dev) > 15)) {
			rcr |= SONIC_RCR_AMC;
		} else {
			if (sonic_debug > 2)
				printk("sonic_multicast_list: mc_count %d\n",
				       netdev_mc_count(dev));
			sonic_set_cam_enable(dev, 1);  /* always enable our own address */
			i = 1;
			netdev_for_each_mc_addr(ha, dev) {
				addr = ha->addr;
				sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
				sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
				sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
				sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
				i++;
			}
			SONIC_WRITE(SONIC_CDC, 16);
			/* issue Load CAM command */
			SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
			SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
		}
	}

	if (sonic_debug > 2)
		printk("sonic_multicast_list: setting RCR=%x\n", rcr);

	SONIC_WRITE(SONIC_RCR, rcr);
}


/*
 * Initialize the SONIC ethernet controller.
 */
static int sonic_init(struct net_device *dev)
{
	unsigned int cmd;
	struct sonic_local *lp = netdev_priv(dev);
	int i;

	/*
	 * put the Sonic into software-reset mode and
	 * disable all interrupts
	 */
	SONIC_WRITE(SONIC_IMR, 0);
	SONIC_WRITE(SONIC_ISR, 0x7fff);
	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);

	/*
	 * clear software reset flag, disable receiver, clear and
	 * enable interrupts, then completely initialize the SONIC
	 */
	SONIC_WRITE(SONIC_CMD, 0);
	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);

	/*
	 * initialize the receive resource area
	 */
	if (sonic_debug > 2)
		printk("sonic_init: initialize receive resource area\n");

	for (i = 0; i < SONIC_NUM_RRS; i++) {
		u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
		u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
		sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
		sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
		sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
		sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
	}

	/* initialize all RRA registers */
	lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
					SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
	lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
					SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;

	SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
	SONIC_WRITE(SONIC_REA, lp->rra_end);
	SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
	SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
	SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
	SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));

	/* load the resource pointers */
	if (sonic_debug > 3)
		printk("sonic_init: issuing RRRA command\n");

	SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
	i = 0;
	while (i++ < 100) {
		if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
			break;
	}

	if (sonic_debug > 2)
		printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i);

	/*
	 * Initialize the receive descriptors so that they
	 * become a circular linked list, ie. let the last
	 * descriptor point to the first again.
	 */
	if (sonic_debug > 2)
		printk("sonic_init: initialize receive descriptors\n");
	for (i=0; i<SONIC_NUM_RDS; i++) {