aboutsummaryrefslogblamecommitdiffstats
path: root/drivers/net/lib8390.c
blob: 00d59ab2f8ac98ecf2cfec78b59cfb0626ae4dd8 (plain) (tree)















































































































































                                                                                                 



                                                                                 
                                                                            
                                   
  
                                                    
  


                                                                        
  

                                                                            
  





                                                                            
  
                                                                               
  

                                                                                
  
                                       
  
                                 
  
                                                                         
  

                                                                             










































































                                                                             
                               








                                                                                     
                                           































































































                                                                                                    
                                       








































                                                                            
                                           









































































                                                                                                

                                                                                          























































                                                                                                      
                                                                       






















                                                                             


                                                                       





























































                                                                                             
                                        
                               
                                        
            
                                       
                                       
                                                       
                                       
                                                       
                                      
                                                    
                                       
                                                         
                                       
                                                      
































































                                                                                                                    
                                               







                                                                                                        
                                                      









                                                                                                          
                                                        



                                                                                              



                                                                                                     
                                                               
                                                         
                                                               






                                                                                                      
                                               
                                                                              
                                                            


































                                                                                    
                                                                       








                                                                            
                                    



























































                                                                                                         
                                   

                                                                    

                                                                      
                                                            
                           






































































































































































































































                                                                                                     
/* 8390.c: A general NS8390 ethernet driver core for linux. */
/*
	Written 1992-94 by Donald Becker.

	Copyright 1993 United States Government as represented by the
	Director, National Security Agency.

	This software may be used and distributed according to the terms
	of the GNU General Public License, incorporated herein by reference.

	The author may be reached as becker@scyld.com, or C/O
	Scyld Computing Corporation
	410 Severn Ave., Suite 210
	Annapolis MD 21403


  This is the chip-specific code for many 8390-based ethernet adaptors.
  This is not a complete driver, it must be combined with board-specific
  code such as ne.c, wd.c, 3c503.c, etc.

  Seeing how at least eight drivers use this code, (not counting the
  PCMCIA ones either) it is easy to break some card by what seems like
  a simple innocent change. Please contact me or Donald if you think
  you have found something that needs changing. -- PG


  Changelog:

  Paul Gortmaker	: remove set_bit lock, other cleanups.
  Paul Gortmaker	: add ei_get_8390_hdr() so we can pass skb's to
			  ei_block_input() for eth_io_copy_and_sum().
  Paul Gortmaker	: exchange static int ei_pingpong for a #define,
			  also add better Tx error handling.
  Paul Gortmaker	: rewrite Rx overrun handling as per NS specs.
  Alexey Kuznetsov	: use the 8390's six bit hash multicast filter.
  Paul Gortmaker	: tweak ANK's above multicast changes a bit.
  Paul Gortmaker	: update packet statistics for v2.1.x
  Alan Cox		: support arbitary stupid port mappings on the
  			  68K Macintosh. Support >16bit I/O spaces
  Paul Gortmaker	: add kmod support for auto-loading of the 8390
			  module by all drivers that require it.
  Alan Cox		: Spinlocking work, added 'BUG_83C690'
  Paul Gortmaker	: Separate out Tx timeout code from Tx path.
  Paul Gortmaker	: Remove old unused single Tx buffer code.
  Hayato Fujiwara	: Add m32r support.
  Paul Gortmaker	: use skb_padto() instead of stack scratch area

  Sources:
  The National Semiconductor LAN Databook, and the 3Com 3c503 databook.

  */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/crc32.h>

#include <linux/netdevice.h>
#include <linux/etherdevice.h>

#define NS8390_CORE
#include "8390.h"

#define BUG_83C690

/* These are the operational function interfaces to board-specific
   routines.
	void reset_8390(struct net_device *dev)
		Resets the board associated with DEV, including a hardware reset of
		the 8390.  This is only called when there is a transmit timeout, and
		it is always followed by 8390_init().
	void block_output(struct net_device *dev, int count, const unsigned char *buf,
					  int start_page)
		Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
		"page" value uses the 8390's 256-byte pages.
	void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
		Read the 4 byte, page aligned 8390 header. *If* there is a
		subsequent read, it will be of the rest of the packet.
	void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
		Read COUNT bytes from the packet buffer into the skb data area. Start
		reading from RING_OFFSET, the address as the 8390 sees it.  This will always
		follow the read of the 8390 header.
*/
#define ei_reset_8390 (ei_local->reset_8390)
#define ei_block_output (ei_local->block_output)
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)

/* use 0 for production, 1 for verification, >2 for debug */
#ifndef ei_debug
int ei_debug = 1;
#endif

/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
static void ei_tx_timeout(struct net_device *dev);
static void ei_receive(struct net_device *dev);
static void ei_rx_overrun(struct net_device *dev);

/* Routines generic to NS8390-based boards. */
static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
								int start_page);
static void set_multicast_list(struct net_device *dev);
static void do_set_multicast_list(struct net_device *dev);
static void __NS8390_init(struct net_device *dev, int startp);

/*
 *	SMP and the 8390 setup.
 *
 *	The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
 *	a page register that controls bank and packet buffer access. We guard
 *	this with ei_local->page_lock. Nobody should assume or set the page other
 *	than zero when the lock is not held. Lock holders must restore page 0
 *	before unlocking. Even pure readers must take the lock to protect in
 *	page 0.
 *
 *	To make life difficult the chip can also be very slow. We therefore can't
 *	just use spinlocks. For the longer lockups we disable the irq the device
 *	sits on and hold the lock. We must hold the lock because there is a dual
 *	processor case other than interrupts (get stats/set multicast list in
 *	parallel with each other and transmit).
 *
 *	Note: in theory we can just disable the irq on the card _but_ there is
 *	a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
 *	enter lock, take the queued irq. So we waddle instead of flying.
 *
 *	Finally by special arrangement for the purpose of being generally
 *	annoying the transmit function is called bh atomic. That places
 *	restrictions on the user context callers as disable_irq won't save
 *	them.
 *
 *	Additional explanation of problems with locking by Alan Cox:
 *
 *	"The author (me) didn't use spin_lock_irqsave because the slowness of the
 *	card means that approach caused horrible problems like losing serial data
 *	at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
 *	chips with FPGA front ends.
 *
 *	Ok the logic behind the 8390 is very simple:
 *
 *	Things to know
 *		- IRQ delivery is asynchronous to the PCI bus
 *		- Blocking the local CPU IRQ via spin locks was too slow
 *		- The chip has register windows needing locking work
 *
 *	So the path was once (I say once as people appear to have changed it
 *	in the mean time and it now looks rather bogus if the changes to use
 *	disable_irq_nosync_irqsave are disabling the local IRQ)
 *
 *
 *		Take the page lock
 *		Mask the IRQ on chip
 *		Disable the IRQ (but not mask locally- someone seems to have
 *			broken this with the lock validator stuff)
 *			[This must be _nosync as the page lock may otherwise
 *				deadlock us]
 *		Drop the page lock and turn IRQs back on
 *
 *		At this point an existing IRQ may still be running but we can't
 *		get a new one
 *
 *		Take the lock (so we know the IRQ has terminated) but don't mask
 *	the IRQs on the processor
 *		Set irqlock [for debug]
 *
 *		Transmit (slow as ****)
 *
 *		re-enable the IRQ
 *
 *
 *	We have to use disable_irq because otherwise you will get delayed
 *	interrupts on the APIC bus deadlocking the transmit path.
 *
 *	Quite hairy but the chip simply wasn't designed for SMP and you can't
 *	even ACK an interrupt without risking corrupting other parallel
 *	activities on the chip." [lkml, 25 Jul 2007]
 */



/**
 * ei_open - Open/initialize the board.
 * @dev: network device to initialize
 *
 * This routine goes all-out, setting everything
 * up anew at each open, even though many of these registers should only
 * need to be set once at boot.
 */
static int __ei_open(struct net_device *dev)
{
	unsigned long flags;
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);

	/* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
	    wrapper that does e.g. media check & then calls ei_tx_timeout. */
	if (dev->tx_timeout == NULL)
		 dev->tx_timeout = ei_tx_timeout;
	if (dev->watchdog_timeo <= 0)
		 dev->watchdog_timeo = TX_TIMEOUT;

	/*
	 *	Grab the page lock so we own the register set, then call
	 *	the init function.
	 */

      	spin_lock_irqsave(&ei_local->page_lock, flags);
	__NS8390_init(dev, 1);
	/* Set the flag before we drop the lock, That way the IRQ arrives
	   after its set and we get no silly warnings */
	netif_start_queue(dev);
      	spin_unlock_irqrestore(&ei_local->page_lock, flags);
	ei_local->irqlock = 0;
	return 0;
}

/**
 * ei_close - shut down network device
 * @dev: network device to close
 *
 * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
 */
static int __ei_close(struct net_device *dev)
{
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	unsigned long flags;

	/*
	 *	Hold the page lock during close
	 */

      	spin_lock_irqsave(&ei_local->page_lock, flags);
	__NS8390_init(dev, 0);
      	spin_unlock_irqrestore(&ei_local->page_lock, flags);
	netif_stop_queue(dev);
	return 0;
}

/**
 * ei_tx_timeout - handle transmit time out condition
 * @dev: network device which has apparently fallen asleep
 *
 * Called by kernel when device never acknowledges a transmit has
 * completed (or failed) - i.e. never posted a Tx related interrupt.
 */

static void ei_tx_timeout(struct net_device *dev)
{
	unsigned long e8390_base = dev->base_addr;
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	int txsr, isr, tickssofar = jiffies - dev->trans_start;
	unsigned long flags;

	dev->stats.tx_errors++;

	spin_lock_irqsave(&ei_local->page_lock, flags);
	txsr = ei_inb(e8390_base+EN0_TSR);
	isr = ei_inb(e8390_base+EN0_ISR);
	spin_unlock_irqrestore(&ei_local->page_lock, flags);

	printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
		dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
		(isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);

	if (!isr && !dev->stats.tx_packets)
	{
		/* The 8390 probably hasn't gotten on the cable yet. */
		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
	}

	/* Ugly but a reset can be slow, yet must be protected */

	disable_irq_nosync_lockdep(dev->irq);
	spin_lock(&ei_local->page_lock);

	/* Try to restart the card.  Perhaps the user has fixed something. */
	ei_reset_8390(dev);
	__NS8390_init(dev, 1);

	spin_unlock(&ei_local->page_lock);
	enable_irq_lockdep(dev->irq);
	netif_wake_queue(dev);
}

/**
 * ei_start_xmit - begin packet transmission
 * @skb: packet to be sent
 * @dev: network device to which packet is sent
 *
 * Sends a packet to an 8390 network device.
 */

static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned long e8390_base = dev->base_addr;
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	int send_length = skb->len, output_page;
	unsigned long flags;
	char buf[ETH_ZLEN];
	char *data = skb->data;

	if (skb->len < ETH_ZLEN) {
		memset(buf, 0, ETH_ZLEN);	/* more efficient than doing just the needed bits */
		memcpy(buf, data, skb->len);
		send_length = ETH_ZLEN;
		data = buf;
	}

	/* Mask interrupts from the ethercard.
	   SMP: We have to grab the lock here otherwise the IRQ handler
	   on another CPU can flip window and race the IRQ mask set. We end
	   up trashing the mcast filter not disabling irqs if we don't lock */

	spin_lock_irqsave(&ei_local->page_lock, flags);
	ei_outb_p(0x00, e8390_base + EN0_IMR);
	spin_unlock_irqrestore(&ei_local->page_lock, flags);


	/*
	 *	Slow phase with lock held.
	 */

	disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);

	spin_lock(&ei_local->page_lock);

	ei_local->irqlock = 1;

	/*
	 * We have two Tx slots available for use. Find the first free
	 * slot, and then perform some sanity checks. With two Tx bufs,
	 * you get very close to transmitting back-to-back packets. With
	 * only one Tx buf, the transmitter sits idle while you reload the
	 * card, leaving a substantial gap between each transmitted packet.
	 */

	if (ei_local->tx1 == 0)
	{
		output_page = ei_local->tx_start_page;
		ei_local->tx1 = send_length;
		if (ei_debug  &&  ei_local->tx2 > 0)
			printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
				dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
	}
	else if (ei_local->tx2 == 0)
	{
		output_page = ei_local->tx_start_page + TX_PAGES/2;
		ei_local->tx2 = send_length;
		if (ei_debug  &&  ei_local->tx1 > 0)
			printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
				dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
	}
	else
	{	/* We should never get here. */
		if (ei_debug)
			printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
				dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
		ei_local->irqlock = 0;
		netif_stop_queue(dev);
		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
		spin_unlock(&ei_local->page_lock);
		enable_irq_lockdep_irqrestore(dev->irq, &flags);
		dev->stats.tx_errors++;
		return 1;
	}

	/*
	 * Okay, now upload the packet and trigger a send if the transmitter
	 * isn't already sending. If it is busy, the interrupt handler will
	 * trigger the send later, upon receiving a Tx done interrupt.
	 */

	ei_block_output(dev, send_length, data, output_page);

	if (! ei_local->txing)
	{
		ei_local->txing = 1;
		NS8390_trigger_send(dev, send_length, output_page);
		dev->trans_start = jiffies;
		if (output_page == ei_local->tx_start_page)
		{
			ei_local->tx1 = -1;
			ei_local->lasttx = -1;
		}
		else
		{
			ei_local->tx2 = -1;
			ei_local->lasttx = -2;
		}
	}
	else ei_local->txqueue++;

	if (ei_local->tx1  &&  ei_local->tx2)
		netif_stop_queue(dev);
	else
		netif_start_queue(dev);

	/* Turn 8390 interrupts back on. */
	ei_local->irqlock = 0;
	ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);

	spin_unlock(&ei_local->page_lock);
	enable_irq_lockdep_irqrestore(dev->irq, &flags);

	dev_kfree_skb (skb);
	dev->stats.tx_bytes += send_length;

	return 0;
}

/**
 * ei_interrupt - handle the interrupts from an 8390
 * @irq: interrupt number
 * @dev_id: a pointer to the net_device
 *
 * Handle the ether interface interrupts. We pull packets from
 * the 8390 via the card specific functions and fire them at the networking
 * stack. We also handle transmit completions and wake the transmit path if
 * necessary. We also update the counters and do other housekeeping as
 * needed.
 */

static irqreturn_t __ei_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	unsigned long e8390_base = dev->base_addr;
	int interrupts, nr_serviced = 0;
	struct ei_device *ei_local = netdev_priv(dev);

	/*
	 *	Protect the irq test too.
	 */

	spin_lock(&ei_local->page_lock);

	if (ei_local->irqlock)
	{
#if 1 /* This might just be an interrupt for a PCI device sharing this line */
		/* The "irqlock" check is only for testing. */
		printk(ei_local->irqlock
			   ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
			   : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
			   dev->name, ei_inb_p(e8390_base + EN0_ISR),
			   ei_inb_p(e8390_base + EN0_IMR));
#endif
		spin_unlock(&ei_local->page_lock);
		return IRQ_NONE;
	}

	/* Change to page 0 and read the intr status reg. */
	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
	if (ei_debug > 3)
		printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
			   ei_inb_p(e8390_base + EN0_ISR));

	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0
		   && ++nr_serviced < MAX_SERVICE)
	{
		if (!netif_running(dev)) {
			printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
			/* rmk - acknowledge the interrupts */
			ei_outb_p(interrupts, e8390_base + EN0_ISR);
			interrupts = 0;
			break;
		}
		if (interrupts & ENISR_OVER)
			ei_rx_overrun(dev);
		else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
		{
			/* Got a good (?) packet. */
			ei_receive(dev);
		}
		/* Push the next to-transmit packet through. */
		if (interrupts & ENISR_TX)
			ei_tx_intr(dev);
		else if (interrupts & ENISR_TX_ERR)
			ei_tx_err(dev);

		if (interrupts & ENISR_COUNTERS)
		{
			dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
			dev->stats.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1);
			dev->stats.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2);
			ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
		}

		/* Ignore any RDC interrupts that make it back to here. */
		if (interrupts & ENISR_RDC)
		{
			ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
		}

		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
	}

	if (interrupts && ei_debug)
	{
		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
		if (nr_serviced >= MAX_SERVICE)
		{
			/* 0xFF is valid for a card removal */
			if(interrupts!=0xFF)
				printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
				   dev->name, interrupts);
			ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
		} else {
			printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
			ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
		}
	}
	spin_unlock(&ei_local->page_lock);
	return IRQ_RETVAL(nr_serviced > 0);
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void __ei_poll(struct net_device *dev)
{
	disable_irq_lockdep(dev->irq);
	__ei_interrupt(dev->irq, dev);
	enable_irq_lockdep(dev->irq);
}
#endif

/**
 * ei_tx_err - handle transmitter error
 * @dev: network device which threw the exception
 *
 * A transmitter error has happened. Most likely excess collisions (which
 * is a fairly normal condition). If the error is one where the Tx will
 * have been aborted, we try and send another one right away, instead of
 * letting the failed packet sit and collect dust in the Tx buffer. This
 * is a much better solution as it avoids kernel based Tx timeouts, and
 * an unnecessary card reset.
 *
 * Called with lock held.
 */

static void ei_tx_err(struct net_device *dev)
{
	unsigned long e8390_base = dev->base_addr;
	/* ei_local is used on some platforms via the EI_SHIFT macro */
	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
	unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);

#ifdef VERBOSE_ERROR_DUMP
	printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
	if (txsr & ENTSR_ABT)
		printk("excess-collisions ");
	if (txsr & ENTSR_ND)
		printk("non-deferral ");
	if (txsr & ENTSR_CRS)
		printk("lost-carrier ");
	if (txsr & ENTSR_FU)
		printk("FIFO-underrun ");
	if (txsr & ENTSR_CDH)
		printk("lost-heartbeat ");
	printk("\n");
#endif

	ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */

	if (tx_was_aborted)
		ei_tx_intr(dev);
	else
	{
		dev->stats.tx_errors++;
		if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++;
		if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++;
		if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;
	}
}

/**
 * ei_tx_intr - transmit interrupt handler
 * @dev: network device for which tx intr is handled
 *
 * We have finished a transmit: check for errors and then trigger the next
 * packet to be sent. Called with lock held.
 */

static void ei_tx_intr(struct net_device *dev)
{
	unsigned long e8390_base = dev->base_addr;
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	int status = ei_inb(e8390_base + EN0_TSR);

	ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */

	/*
	 * There are two Tx buffers, see which one finished, and trigger
	 * the send of another one if it exists.
	 */
	ei_local->txqueue--;

	if (ei_local->tx1 < 0)
	{
		if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
			printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
				ei_local->name, ei_local->lasttx, ei_local->tx1);
		ei_local->tx1 = 0;
		if (ei_local->tx2 > 0)
		{
			ei_local->txing = 1;
			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
			dev->trans_start = jiffies;
			ei_local->tx2 = -1,
			ei_local->lasttx = 2;
		}
		else ei_local->lasttx = 20, ei_local->txing = 0;
	}
	else if (ei_local->tx2 < 0)
	{
		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
			printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
				ei_local->name, ei_local->lasttx, ei_local->tx2);
		ei_local->tx2 = 0;
		if (ei_local->tx1 > 0)
		{
			ei_local->txing = 1;
			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
			dev->trans_start = jiffies;
			ei_local->tx1 = -1;
			ei_local->lasttx = 1;
		}
		else
			ei_local->lasttx = 10, ei_local->txing = 0;
	}
//	else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
//			dev->name, ei_local->lasttx);

	/* Minimize Tx latency: update the statistics after we restart TXing. */
	if (status & ENTSR_COL)
		dev->stats.collisions++;
	if (status & ENTSR_PTX)
		dev->stats.tx_packets++;
	else
	{
		dev->stats.tx_errors++;
		if (status & ENTSR_ABT)
		{
			dev->stats.tx_aborted_errors++;
			dev->stats.collisions += 16;
		}
		if (status & ENTSR_CRS)
			dev->stats.tx_carrier_errors++;
		if (status & ENTSR_FU)
			dev->stats.tx_fifo_errors++;
		if (status & ENTSR_CDH)
			dev->stats.tx_heartbeat_errors++;
		if (status & ENTSR_OWC)
			dev->stats.tx_window_errors++;
	}
	netif_wake_queue(dev);
}

/**
 * ei_receive - receive some packets
 * @dev: network device with which receive will be run
 *
 * We have a good packet(s), get it/them out of the buffers.
 * Called with lock held.
 */

static void ei_receive(struct net_device *dev)
{
	unsigned long e8390_base = dev->base_addr;
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	unsigned char rxing_page, this_frame, next_frame;
	unsigned short current_offset;
	int rx_pkt_count = 0;
	struct e8390_pkt_hdr rx_frame;
	int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;

	while (++rx_pkt_count < 10)
	{
		int pkt_len, pkt_stat;

		/* Get the rx page (incoming packet pointer). */
		ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
		rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
		ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);

		/* Remove one frame from the ring.  Boundary is always a page behind. */
		this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
		if (this_frame >= ei_local->stop_page)
			this_frame = ei_local->rx_start_page;

		/* Someday we'll omit the previous, iff we never get this message.
		   (There is at least one clone claimed to have a problem.)

		   Keep quiet if it looks like a card removal. One problem here
		   is that some clones crash in roughly the same way.
		 */
		if (ei_debug > 0  &&  this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
			printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
				   dev->name, this_frame, ei_local->current_page);

		if (this_frame == rxing_page)	/* Read all the frames? */
			break;				/* Done for now */

		current_offset = this_frame << 8;
		ei_get_8390_hdr(dev, &rx_frame, this_frame);

		pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
		pkt_stat = rx_frame.status;

		next_frame = this_frame + 1 + ((pkt_len+4)>>8);

		/* Check for bogosity warned by 3c503 book: the status byte is never
		   written.  This happened a lot during testing! This code should be
		   cleaned up someday. */
		if (rx_frame.next != next_frame
			&& rx_frame.next != next_frame + 1
			&& rx_frame.next != next_frame - num_rx_pages
			&& rx_frame.next != next_frame + 1 - num_rx_pages) {
			ei_local->current_page = rxing_page;
			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
			dev->stats.rx_errors++;
			continue;
		}

		if (pkt_len < 60  ||  pkt_len > 1518)
		{
			if (ei_debug)
				printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
					   dev->name, rx_frame.count, rx_frame.status,
					   rx_frame.next);
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
		}
		 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
		{
			struct sk_buff *skb;

			skb = dev_alloc_skb(pkt_len+2);
			if (skb == NULL)
			{
				if (ei_debug > 1)
					printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
						   dev->name, pkt_len);
				dev->stats.rx_dropped++;
				break;
			}
			else
			{
				skb_reserve(skb,2);	/* IP headers on 16 byte boundaries */
				skb_put(skb, pkt_len);	/* Make room */
				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
				skb->protocol=eth_type_trans(skb,dev);
				netif_rx(skb);
				dev->last_rx = jiffies;
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += pkt_len;
				if (pkt_stat & ENRSR_PHY)
					dev->stats.multicast++;
			}
		}
		else
		{
			if (ei_debug)
				printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
					   dev->name, rx_frame.status, rx_frame.next,
					   rx_frame.count);
			dev->stats.rx_errors++;
			/* NB: The NIC counts CRC, frame and missed errors. */
			if (pkt_stat & ENRSR_FO)
				dev->stats.rx_fifo_errors++;
		}
		next_frame = rx_frame.next;

		/* This _should_ never happen: it's here for avoiding bad clones. */
		if (next_frame >= ei_local->stop_page) {
			printk("%s: next frame inconsistency, %#2x\n", dev->name,
				   next_frame);
			next_frame = ei_local->rx_start_page;
		}
		ei_local->current_page = next_frame;
		ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
	}

	/* We used to also ack ENISR_OVER here, but that would sometimes mask
	   a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
	ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
	return;
}

/**
 * ei_rx_overrun - handle receiver overrun
 * @dev: network device which threw exception
 *
 * We have a receiver overrun: we have to kick the 8390 to get it started
 * again. Problem is that you have to kick it exactly as NS prescribes in
 * the updated datasheets, or "the NIC may act in an unpredictable manner."
 * This includes causing "the NIC to defer indefinitely when it is stopped
 * on a busy network."  Ugh.
 * Called with lock held. Don't call this with the interrupts off or your
 * computer will hate you - it takes 10ms or so.
 */

static void ei_rx_overrun(struct net_device *dev)
{
	unsigned long e8390_base = dev->base_addr;
	unsigned char was_txing, must_resend = 0;
	/* ei_local is used on some platforms via the EI_SHIFT macro */
	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);

	/*
	 * Record whether a Tx was in progress and then issue the
	 * stop command.
	 */
	was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);

	if (ei_debug > 1)
		printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
	dev->stats.rx_over_errors++;

	/*
	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
	 * Early datasheets said to poll the reset bit, but now they say that
	 * it "is not a reliable indicator and subsequently should be ignored."
	 * We wait at least 10ms.
	 */

	mdelay(10);

	/*
	 * Reset RBCR[01] back to zero as per magic incantation.
	 */
	ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
	ei_outb_p(0x00, e8390_base+EN0_RCNTHI);

	/*
	 * See if any Tx was interrupted or not. According to NS, this
	 * step is vital, and skipping it will cause no end of havoc.
	 */

	if (was_txing)
	{
		unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
		if (!tx_completed)
			must_resend = 1;
	}

	/*
	 * Have to enter loopback mode and then restart the NIC before
	 * you are allowed to slurp packets up off the ring.
	 */
	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
	ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);

	/*
	 * Clear the Rx ring of all the debris, and ack the interrupt.
	 */
	ei_receive(dev);
	ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);

	/*
	 * Leave loopback mode, and resend any packet that got stopped.
	 */
	ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
	if (must_resend)
    		ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
}

/*
 *	Collect the stats. This is called unlocked and from several contexts.
 */

static struct net_device_stats *get_stats(struct net_device *dev)
{
	unsigned long ioaddr = dev->base_addr;
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	unsigned long flags;

	/* If the card is stopped, just return the present stats. */
	if (!netif_running(dev))
		return &dev->stats;

	spin_lock_irqsave(&ei_local->page_lock,flags);
	/* Read the counter registers, assuming we are in page 0. */
	dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
	dev->stats.rx_crc_errors   += ei_inb_p(ioaddr + EN0_COUNTER1);
	dev->stats.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2);
	spin_unlock_irqrestore(&ei_local->page_lock, flags);

	return &dev->stats;
}

/*
 * Form the 64 bit 8390 multicast table from the linked list of addresses
 * associated with this dev structure.
 */

static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
	struct dev_mc_list *dmi;

	for (dmi=dev->mc_list; dmi; dmi=dmi->next)
	{
		u32 crc;
		if (dmi->dmi_addrlen != ETH_ALEN)
		{
			printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
			continue;
		}
		crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
		/*
		 * The 8390 uses the 6 most significant bits of the
		 * CRC to index the multicast table.
		 */
		bits[crc>>29] |= (1<<((crc>>26)&7));
	}
}

/**
 * do_set_multicast_list - set/clear multicast filter
 * @dev: net device for which multicast filter is adjusted
 *
 *	Set or clear the multicast filter for this adaptor. May be called
 *	from a BH in 2.1.x. Must be called with lock held.
 */

static void do_set_multicast_list(struct net_device *dev)
{
	unsigned long e8390_base = dev->base_addr;
	int i;
	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);

	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
	{
		memset(ei_local->mcfilter, 0, 8);
		if (dev->mc_list)
			make_mc_bits(ei_local->mcfilter, dev);
	}
	else
		memset(ei_local->mcfilter, 0xFF, 8);	/* mcast set to accept-all */

	/*
	 * DP8390 manuals don't specify any magic sequence for altering
	 * the multicast regs on an already running card. To be safe, we
	 * ensure multicast mode is off prior to loading up the new hash
	 * table. If this proves to be not enough, we can always resort
	 * to stopping the NIC, loading the table and then restarting.
	 *
	 * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
	 * Elite16) appear to be write-only. The NS 8390 data sheet lists
	 * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
	 * Ultra32 EISA) appears to have this bug fixed.
	 */

	if (netif_running(dev))
		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
	ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
	for(i = 0; i < 8; i++)
	{
		ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
#ifndef BUG_83C690
		if(ei_inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
			printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
#endif
	}
	ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);

  	if(dev->flags&IFF_PROMISC)
  		ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
	else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
  		ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
  	else
  		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
 }

/*
 *	Called without lock held. This is invoked from user context and may
 *	be parallel to just about everything else. Its also fairly quick and
 *	not called too often. Must protect against both bh and irq users
 */

static void set_multicast_list(struct net_device *dev)
{
	unsigned long flags;
	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);

	spin_lock_irqsave(&ei_local->page_lock, flags);
	do_set_multicast_list(dev);
	spin_unlock_irqrestore(&ei_local->page_lock, flags);
}

/**
 * ethdev_setup - init rest of 8390 device struct
 * @dev: network device structure to init
 *
 * Initialize the rest of the 8390 device structure.  Do NOT __init
 * this, as it is used by 8390 based modular drivers too.
 */

static void ethdev_setup(struct net_device *dev)
{
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	if (ei_debug > 1)
		printk(version);

	dev->hard_start_xmit = &ei_start_xmit;
	dev->get_stats	= get_stats;
	dev->set_multicast_list = &set_multicast_list;

	ether_setup(dev);

	spin_lock_init(&ei_local->page_lock);
}

/**
 * alloc_ei_netdev - alloc_etherdev counterpart for 8390
 * @size: extra bytes to allocate
 *
 * Allocate 8390-specific net_device.
 */
static struct net_device *____alloc_ei_netdev(int size)
{
	return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
				ethdev_setup);
}




/* This page of functions should be 8390 generic */
/* Follow National Semi's recommendations for initializing the "NIC". */

/**
 * NS8390_init - initialize 8390 hardware
 * @dev: network device to initialize
 * @startp: boolean.  non-zero value to initiate chip processing
 *
 *	Must be called with lock held.
 */

static void __NS8390_init(struct net_device *dev, int startp)
{
	unsigned long e8390_base = dev->base_addr;
	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
	int i;
	int endcfg = ei_local->word16
	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
	    : 0x48;

	if(sizeof(struct e8390_pkt_hdr)!=4)
    		panic("8390.c: header struct mispacked\n");
	/* Follow National Semi's recommendations for initing the DP83902. */
	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
	ei_outb_p(endcfg, e8390_base + EN0_DCFG);	/* 0x48 or 0x49 */
	/* Clear the remote byte count registers. */
	ei_outb_p(0x00,  e8390_base + EN0_RCNTLO);
	ei_outb_p(0x00,  e8390_base + EN0_RCNTHI);
	/* Set to monitor and loopback mode -- this is vital!. */
	ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
	/* Set the transmit page and receive ring. */
	ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
	ei_local->tx1 = ei_local->tx2 = 0;
	ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
	ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);	/* 3c503 says 0x3f,NS0x26*/
	ei_local->current_page = ei_local->rx_start_page;		/* assert boundary+1 */
	ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
	/* Clear the pending interrupts and mask. */
	ei_outb_p(0xFF, e8390_base + EN0_ISR);
	ei_outb_p(0x00,  e8390_base + EN0_IMR);

	/* Copy the station address into the DS8390 registers. */

	ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
	for(i = 0; i < 6; i++)
	{
		ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
		if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
			printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
	}

	ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);

	netif_start_queue(dev);
	ei_local->tx1 = ei_local->tx2 = 0;
	ei_local->txing = 0;

	if (startp)
	{
		ei_outb_p(0xff,  e8390_base + EN0_ISR);
		ei_outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
		ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
		/* 3c503 TechMan says rxconfig only after the NIC is started. */
		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
		do_set_multicast_list(dev);	/* (re)load the mcast table */
	}
}

/* Trigger a transmit start, assuming the length is valid.
   Always called with the page lock held */

static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
								int start_page)
{
	unsigned long e8390_base = dev->base_addr;
 	struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);

	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);

	if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
	{
		printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
			dev->name);
		return;
	}
	ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
	ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
	ei_outb_p(start_page, e8390_base + EN0_TPSR);
	ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
}
hl opt">[2]); } static int ql_get_nvram_params(struct ql3_adapter *qdev) { u16 *pEEPROMData; u16 checksum = 0; u32 index; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); pEEPROMData = (u16 *) & qdev->nvram_data; qdev->eeprom_cmd_data = 0; if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 10)) { printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", __func__); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } for (index = 0; index < EEPROM_SIZE; index++) { eeprom_readword(qdev, index, pEEPROMData); checksum += *pEEPROMData; pEEPROMData++; } ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); if (checksum != 0) { printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", qdev->ndev->name, checksum); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return checksum; } static const u32 PHYAddr[2] = { PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS }; static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 temp; int count = 1000; while (count) { temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); if (!(temp & MAC_MII_STATUS_BSY)) return 0; udelay(10); count--; } return -1; } static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 scanControl; if (qdev->numPorts > 1) { /* Auto scan will cycle through multiple ports */ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; } else { scanControl = MAC_MII_CONTROL_SC; } /* * Scan register 1 of PHY/PETBI, * Set up to scan both devices * The autoscan starts from the first register, completes * the last one before rolling over to the first */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (scanControl) | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); } static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) { u8 ret; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; /* See if scan mode is enabled before we turn it off */ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { /* Scan is enabled */ ret = 1; } else { /* Scan is disabled */ ret = 0; } /* * When disabling scan mode you must first change the MII register * address */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | MAC_MII_CONTROL_RC) << 16)); return ret; } static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete 9/10/04 SJP */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 * value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; u32 temp; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free after issuing command.\n", qdev->ndev->name); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete. */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) { u32 temp; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; ql_mii_enable_scan_mode(qdev); return 0; } static void ql_petbi_reset(struct ql3_adapter *qdev) { ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); } static void ql_petbi_start_neg(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); ql_mii_write_reg(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); } static void ql_petbi_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, PHYAddr[qdev->mac_index]); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, PHYAddr[qdev->mac_index]); } static void ql_petbi_init(struct ql3_adapter *qdev) { ql_petbi_reset(qdev); ql_petbi_start_neg(qdev); } static void ql_petbi_init_ex(struct ql3_adapter *qdev) { ql_petbi_reset_ex(qdev); ql_petbi_start_neg_ex(qdev); } static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0) return 0; return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; } static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) { printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); /* power down device bit 11 = 1 */ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); /* enable diagnostic mode bit 2 = 1 */ ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); /* point to hidden reg 0x2806 */ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); /* Write new PHYAD w/bit 5 set */ ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); /* * Disable diagnostic mode bit 2 = 0 * Power up device bit 11 = 0 * Link up (on) and activity (blink) */ ql_mii_write_reg(qdev, 0x12, 0x840a); ql_mii_write_reg(qdev, 0x00, 0x1140); ql_mii_write_reg(qdev, 0x1c, 0xfaf0); } static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, u16 phyIdReg0, u16 phyIdReg1) { PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; u32 oui; u16 model; int i; if (phyIdReg0 == 0xffff) { return result; } if (phyIdReg1 == 0xffff) { return result; } /* oui is split between two registers */ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; /* Scan table for this PHY */ for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) { result = PHY_DEVICES[i].phyDevice; printk(KERN_INFO "%s: Phy: %s\n", qdev->ndev->name, PHY_DEVICES[i].name); break; } } return result; } static int ql_phy_get_speed(struct ql3_adapter *qdev) { u16 reg; switch(qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0) return 0; reg = (reg >> 8) & 3; break; } default: if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; reg = (((reg & 0x18) >> 3) & 3); } switch(reg) { case 2: return SPEED_1000; case 1: return SPEED_100; case 0: return SPEED_10; default: return -1; } } static int ql_is_full_dup(struct ql3_adapter *qdev) { u16 reg; switch(qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg)) return 0; return ((reg & 0x0080) && (reg & 0x1000)) != 0; } case PHY_VITESSE_VSC8211: default: { if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; return (reg & PHY_AUX_DUPLEX_STAT) != 0; } } } static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0) return 0; return (reg & PHY_NEG_PAUSE) != 0; } static int PHY_Setup(struct ql3_adapter *qdev) { u16 reg1; u16 reg2; bool agereAddrChangeNeeded = false; u32 miiAddr = 0; int err; /* Determine the PHY we are using by reading the ID's */ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", qdev->ndev->name); return err; } err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", qdev->ndev->name); return err; } /* Check if we have a Agere PHY */ if ((reg1 == 0xffff) || (reg2 == 0xffff)) { /* Determine which MII address we should be using determined by the index of the card */ if (qdev->mac_index == 0) { miiAddr = MII_AGERE_ADDR_1; } else { miiAddr = MII_AGERE_ADDR_2; } err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", qdev->ndev->name); return err; } err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", qdev->ndev->name); return err; } /* We need to remember to initialize the Agere PHY */ agereAddrChangeNeeded = true; } /* Determine the particular PHY we have on board to apply PHY specific initializations */ qdev->phyType = getPhyType(qdev, reg1, reg2); if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { /* need this here so address gets changed */ phyAgereSpecificInit(qdev, miiAddr); } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); return -EIO; } return 0; } /* * Caller holds hw_lock. */ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); else value = (MAC_CONFIG_REG_PE << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); else value = (MAC_CONFIG_REG_SR << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); else value = (MAC_CONFIG_REG_GM << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); else value = (MAC_CONFIG_REG_FD << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); else value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static int ql_is_fiber(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_SM0; break; case 1: bitToCheck = PORT_STATUS_SM1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static int ql_is_auto_cfg(struct ql3_adapter *qdev) { u16 reg; ql_mii_read_reg(qdev, 0x00, &reg); return (reg & 0x1000) != 0; } /* * Caller holds hw_lock. */ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AC0; break; case 1: bitToCheck = PORT_STATUS_AC1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Auto-Negotiate complete.\n", qdev->ndev->name); return 1; } else { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Auto-Negotiate incomplete.\n", qdev->ndev->name); return 0; } } /* * ql_is_neg_pause() returns 1 if pause was negotiated to be on */ static int ql_is_neg_pause(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return ql_is_petbi_neg_pause(qdev); else return ql_is_phy_neg_pause(qdev); } static int ql_auto_neg_error(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AE0; break; case 1: bitToCheck = PORT_STATUS_AE1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static u32 ql_get_link_speed(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return SPEED_1000; else return ql_phy_get_speed(qdev); } static int ql_is_link_full_dup(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return 1; else return ql_is_full_dup(qdev); } /* * Caller holds hw_lock. */ static int ql_link_down_detect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = ISP_CONTROL_LINK_DN_0; break; case 1: bitToCheck = ISP_CONTROL_LINK_DN_1; break; } temp = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); return (temp & bitToCheck) != 0; } /* * Caller holds hw_lock. */ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; switch (qdev->mac_index) { case 0: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_0) | (ISP_CONTROL_LINK_DN_0 << 16)); break; case 1: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_1) | (ISP_CONTROL_LINK_DN_1 << 16)); break; default: return 1; } return 0; } /* * Caller holds hw_lock. */ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_F1_ENABLED; break; case 1: bitToCheck = PORT_STATUS_F3_ENABLED; break; default: break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: is not link master.\n", qdev->ndev->name); return 0; } else { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: is link master.\n", qdev->ndev->name); return 1; } } static void ql_phy_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; u16 portConfiguration; if(qdev->phyType == PHY_AGERE_ET1011C) { /* turn off external loopback */ ql_mii_write_reg(qdev, 0x13, 0x0000); } if(qdev->mac_index == 0) portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; else portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; /* Some HBA's in the field are set to 0 and they need to be reinterpreted with a default value */ if(portConfiguration == 0) portConfiguration = PORT_CONFIG_DEFAULT; /* Set the 1000 advertisements */ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_GIG_ALL_PARAMS; if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) reg |= PHY_GIG_ADV_1000F; else reg |= PHY_GIG_ADV_1000H; } ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, PHYAddr[qdev->mac_index]); /* Set the 10/100 & pause negotiation advertisements */ ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_NEG_ALL_PARAMS; if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100F; if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10F; } if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100H; if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10H; } if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { reg |= 1; } ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, PHYAddr[qdev->mac_index]); ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, PHYAddr[qdev->mac_index]); } static void ql_phy_init_ex(struct ql3_adapter *qdev) { ql_phy_reset_ex(qdev); PHY_Setup(qdev); ql_phy_start_neg_ex(qdev); } /* * Caller holds hw_lock. */ static u32 ql_get_link_state(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp, linkState; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_UP0; break; case 1: bitToCheck = PORT_STATUS_UP1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { linkState = LS_UP; } else { linkState = LS_DOWN; if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Link is down.\n", qdev->ndev->name); } return linkState; } static int ql_port_start(struct ql3_adapter *qdev) { if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { printk(KERN_ERR "%s: Could not get hw lock for GIO\n", qdev->ndev->name); return -1; } if (ql_is_fiber(qdev)) { ql_petbi_init(qdev); } else { /* Copper port */ ql_phy_init_ex(qdev); } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static int ql_finish_auto_neg(struct ql3_adapter *qdev) { if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (!ql_auto_neg_error(qdev)) { if (test_bit(QL_LINK_MASTER,&qdev->flags)) { /* configure the MAC */ if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Configuring link.\n", qdev->ndev-> name); ql_mac_cfg_soft_reset(qdev, 1); ql_mac_cfg_gig(qdev, (ql_get_link_speed (qdev) == SPEED_1000)); ql_mac_cfg_full_dup(qdev, ql_is_link_full_dup (qdev)); ql_mac_cfg_pause(qdev, ql_is_neg_pause (qdev)); ql_mac_cfg_soft_reset(qdev, 0); /* enable the MAC */ if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Enabling mac.\n", qdev->ndev-> name); ql_mac_enable(qdev, 1); } if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Change port_link_state LS_DOWN to LS_UP.\n", qdev->ndev->name); qdev->port_link_state = LS_UP; netif_start_queue(qdev->ndev); netif_carrier_on(qdev->ndev); if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", qdev->ndev->name, ql_get_link_speed(qdev), ql_is_link_full_dup(qdev) ? "full" : "half"); } else { /* Remote error detected */ if (test_bit(QL_LINK_MASTER,&qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Remote error detected. " "Calling ql_port_start().\n", qdev->ndev-> name); /* * ql_port_start() is shared code and needs * to lock the PHY on it's own. */ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); if(ql_port_start(qdev)) {/* Restart port */ return -1; } else return 0; } } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static void ql_link_state_machine_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, link_state_work.work); u32 curr_link_state; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); curr_link_state = ql_get_link_state(qdev); if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Reset in progress, skip processing link " "state.\n", qdev->ndev->name); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ return; } switch (qdev->port_link_state) { default: if (test_bit(QL_LINK_MASTER,&qdev->flags)) { ql_port_start(qdev); } qdev->port_link_state = LS_DOWN; /* Fall Through */ case LS_DOWN: if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: port_link_state = LS_DOWN.\n", qdev->ndev->name); if (curr_link_state == LS_UP) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: curr_link_state = LS_UP.\n", qdev->ndev->name); if (ql_is_auto_neg_complete(qdev)) ql_finish_auto_neg(qdev); if (qdev->port_link_state == LS_UP) ql_link_down_detect_clear(qdev); } break; case LS_UP: /* * See if the link is currently down or went down and came * back up */ if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Link is down.\n", qdev->ndev->name); qdev->port_link_state = LS_DOWN; } break; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_get_phy_owner(struct ql3_adapter *qdev) { if (ql_this_adapter_controls_port(qdev)) set_bit(QL_LINK_MASTER,&qdev->flags); else clear_bit(QL_LINK_MASTER,&qdev->flags); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_init_scan_mode(struct ql3_adapter *qdev) { ql_mii_enable_scan_mode(qdev); if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { if (ql_this_adapter_controls_port(qdev)) ql_petbi_init_ex(qdev); } else { if (ql_this_adapter_controls_port(qdev)) ql_phy_init_ex(qdev); } } /* * MII_Setup needs to be called before taking the PHY out of reset so that the * management interface clock speed can be set properly. It would be better if * we had a way to disable MDC until after the PHY is out of reset, but we * don't have that capability. */ static int ql_mii_setup(struct ql3_adapter *qdev) { u32 reg; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (qdev->device_id == QL3032_DEVICE_ID) ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ reg = MAC_MII_CONTROL_CLK_SEL_DIV28; ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static u32 ql_supported_modes(struct ql3_adapter *qdev) { u32 supported; if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg; } else { supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP; } return supported; } static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_auto_cfg(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static u32 ql_get_speed(struct ql3_adapter *qdev) { u32 status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_get_link_speed(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_full_dup(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_link_full_dup(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct ql3_adapter *qdev = netdev_priv(ndev); ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { ecmd->port = PORT_FIBRE; } else { ecmd->port = PORT_TP; ecmd->phy_address = qdev->PHYAddr; } ecmd->advertising = ql_supported_modes(qdev); ecmd->autoneg = ql_get_auto_cfg_status(qdev); ecmd->speed = ql_get_speed(qdev); ecmd->duplex = ql_get_full_dup(qdev); return 0; } static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql3_adapter *qdev = netdev_priv(ndev); strncpy(drvinfo->driver, ql3xxx_driver_name, 32); strncpy(drvinfo->version, ql3xxx_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } static u32 ql_get_msglevel(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return qdev->msg_enable; } static void ql_set_msglevel(struct net_device *ndev, u32 value) { struct ql3_adapter *qdev = netdev_priv(ndev); qdev->msg_enable = value; } static void ql_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 reg; if(qdev->mac_index == 0) reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); else reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); pause->autoneg = ql_get_auto_cfg_status(qdev); pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; } static const struct ethtool_ops ql3xxx_ethtool_ops = { .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam, }; static int ql_populate_free_queue(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; dma_addr_t map; int err; while (lrg_buf_cb) { if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { printk(KERN_DEBUG PFX "%s: Failed netdev_alloc_skb().\n", qdev->ndev->name); break; } else { /* * We save some space to copy the ethhdr from * first buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; break; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); pci_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); --qdev->lrg_buf_skb_check; if (!qdev->lrg_buf_skb_check) return 1; } } lrg_buf_cb = lrg_buf_cb->next; } return 0; } /* * Caller holds hw_lock. */ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if (qdev->small_buf_release_cnt >= 16) { while (qdev->small_buf_release_cnt >= 16) { qdev->small_buf_q_producer_index++; if (qdev->small_buf_q_producer_index == NUM_SBUFQ_ENTRIES) qdev->small_buf_q_producer_index = 0; qdev->small_buf_release_cnt -= 8; } wmb(); writel(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); } } /* * Caller holds hw_lock. */ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) { struct bufq_addr_element *lrg_buf_q_ele; int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if ((qdev->lrg_buf_free_count >= 8) && (qdev->lrg_buf_release_cnt >= 16)) { if (qdev->lrg_buf_skb_check) if (!ql_populate_free_queue(qdev)) return; lrg_buf_q_ele = qdev->lrg_buf_next_free; while ((qdev->lrg_buf_release_cnt >= 16) && (qdev->lrg_buf_free_count >= 8)) { for (i = 0; i < 8; i++) { lrg_buf_cb = ql_get_from_lrg_buf_free_list(qdev); lrg_buf_q_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; lrg_buf_q_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; lrg_buf_q_ele++; qdev->lrg_buf_release_cnt--; } qdev->lrg_buf_q_producer_index++; if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) qdev->lrg_buf_q_producer_index = 0; if (qdev->lrg_buf_q_producer_index == (qdev->num_lbufq_entries - 1)) { lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; } } wmb(); qdev->lrg_buf_next_free = lrg_buf_q_ele; writel(qdev->lrg_buf_q_producer_index, &port_regs->CommonRegs.rxLargeQProducerIndex); } } static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, struct ob_mac_iocb_rsp *mac_rsp) { struct ql_tx_buf_cb *tx_cb; int i; int retval = 0; if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); } tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); qdev->ndev->stats.tx_errors++; retval = -EIO; goto frame_not_sent; } if(tx_cb->seg_count == 0) { printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); qdev->ndev->stats.tx_errors++; retval = -EIO; goto invalid_seg_count; } pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[0], mapaddr), pci_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); tx_cb->seg_count--; if (tx_cb->seg_count) { for (i = 1; i < tx_cb->seg_count; i++) { pci_unmap_page(qdev->pdev, pci_unmap_addr(&tx_cb->map[i], mapaddr), pci_unmap_len(&tx_cb->map[i], maplen), PCI_DMA_TODEVICE); } } qdev->ndev->stats.tx_packets++; qdev->ndev->stats.tx_bytes += tx_cb->skb->len; frame_not_sent: dev_kfree_skb_irq(tx_cb->skb); tx_cb->skb = NULL; invalid_seg_count: atomic_inc(&qdev->tx_count); } static void ql_get_sbuf(struct ql3_adapter *qdev) { if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) qdev->small_buf_index = 0; qdev->small_buf_release_cnt++; } static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = NULL; lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; qdev->lrg_buf_release_cnt++; if (++qdev->lrg_buf_index == qdev->num_large_buffers) qdev->lrg_buf_index = 0; return(lrg_buf_cb); } /* * The difference between 3022 and 3032 for inbound completions: * 3022 uses two buffers per completion. The first buffer contains * (some) header info, the second the remainder of the headers plus * the data. For this chip we reserve some space at the top of the * receive buffer so that the header info in buffer one can be * prepended to the buffer two. Buffer two is the sent up while * buffer one is returned to the hardware to be reused. * 3032 receives all of it's data and headers in one buffer for a * simpler process. 3032 also supports checksum verification as * can be seen in ql_process_macip_rx_intr(). */ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb; u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) lrg_buf_cb1 = ql_get_lbuf(qdev); /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb = lrg_buf_cb2->skb; qdev->ndev->stats.rx_packets++; qdev->ndev->stats.rx_bytes += length; skb_put(skb, length); pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb2, mapaddr), pci_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb->data); skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, qdev->ndev); netif_receive_skb(skb); qdev->ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb1 = NULL, *skb2; struct net_device *ndev = qdev->ndev; u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); u16 size = 0; /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) { /* start of first buffer on 3022 */ lrg_buf_cb1 = ql_get_lbuf(qdev); skb1 = lrg_buf_cb1->skb; size = ETH_HLEN; if (*((u16 *) skb1->data) != 0xFFFF) size += VLAN_ETH_HLEN - ETH_HLEN; } /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb2 = lrg_buf_cb2->skb; skb_put(skb2, length); /* Just the second buffer length here. */ pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb2, mapaddr), pci_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb2->data); skb2->ip_summed = CHECKSUM_NONE; if (qdev->device_id == QL3022_DEVICE_ID) { /* * Copy the ethhdr from first buffer to second. This * is necessary for 3022 IP completions. */ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); if (checksum & (IB_IP_IOCB_RSP_3032_ICE | IB_IP_IOCB_RSP_3032_CE)) { printk(KERN_ERR "%s: Bad checksum for this %s packet, checksum = %x.\n", __func__, ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP"),checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || (checksum & IB_IP_IOCB_RSP_3032_UDP && !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { skb2->ip_summed = CHECKSUM_UNNECESSARY; } } skb2->protocol = eth_type_trans(skb2, qdev->ndev); netif_receive_skb(skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static int ql_tx_rx_clean(struct ql3_adapter *qdev, int *tx_cleaned, int *rx_cleaned, int work_to_do) { struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; int work_done = 0; /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != qdev->rsp_consumer_index) && (work_done < work_to_do)) { net_rsp = qdev->rsp_current; rmb(); /* * Fix 4032 chipe undocumented "feature" where bit-8 is set if the * inbound completion is for a VLAN. */ if (qdev->device_id == QL3032_DEVICE_ID) net_rsp->opcode &= 0x7f; switch (net_rsp->opcode) { case OPCODE_OB_MAC_IOCB_FN0: case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); (*tx_cleaned)++; break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; default: { u32 *tmp = (u32 *) net_rsp; printk(KERN_ERR PFX "%s: Hit default case, not " "handled!\n" " dropping the packet, opcode = " "%x.\n", ndev->name, net_rsp->opcode); printk(KERN_ERR PFX "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", (unsigned long int)tmp[0], (unsigned long int)tmp[1], (unsigned long int)tmp[2], (unsigned long int)tmp[3]); } } qdev->rsp_consumer_index++; if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; } else { qdev->rsp_current++; } work_done = *tx_cleaned + *rx_cleaned; } return work_done; } static int ql_poll(struct napi_struct *napi, int budget) { struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); struct net_device *ndev = qdev->ndev; int rx_cleaned = 0, tx_cleaned = 0; unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); if (tx_cleaned + rx_cleaned != budget) { spin_lock_irqsave(&qdev->hw_lock, hw_flags); __netif_rx_complete(ndev, napi); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ql_enable_interrupts(qdev); } return tx_cleaned + rx_cleaned; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; int handled = 1; u32 var; port_regs = qdev->mem_map_registers; value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { spin_lock(&qdev->adapter_lock); netif_stop_queue(qdev->ndev); netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; set_bit(QL_RESET_ACTIVE,&qdev->flags) ; if (value & ISP_CONTROL_FE) { /* * Chip Fatal Error. */ var = ql_read_page0_reg_l(qdev, &port_regs->PortFatalErrStatus); printk(KERN_WARNING PFX "%s: Resetting chip. PortFatalErrStatus " "register = 0x%x\n", ndev->name, var); set_bit(QL_RESET_START,&qdev->flags) ; } else { /* * Soft Reset Requested. */ set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; printk(KERN_ERR PFX "%s: Another function issued a reset to the " "chip. ISR value = %x.\n", ndev->name, value); } queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) { __netif_rx_schedule(ndev, &qdev->napi); } } else { return IRQ_NONE; } return IRQ_RETVAL(handled); } /* * Get the total number of segments needed for the * given number of fragments. This is necessary because * outbound address lists (OAL) will be used when more than * two frags are given. Each address list has 5 addr/len * pairs. The 5th pair in each AOL is used to point to * the next AOL if more frags are coming. * That is why the frags:segment count ratio is not linear. */ static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) { if (qdev->device_id == QL3022_DEVICE_ID) return 1; switch(frags) { case 0: return 1; /* just the skb->data seg */ case 1: return 2; /* skb->data + 1 frag */ case 2: return 3; /* skb->data + 2 frags */ case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ case 4: return 6; case 5: return 7; case 6: return 8; case 7: return 10; case 8: return 11; case 9: return 12; case 10: return 13; case 11: return 15; case 12: return 16; case 13: return 17; case 14: return 18; case 15: return 20; case 16: return 21; case 17: return 22; case 18: return 23; } return -1; } static void ql_hw_csum_setup(const struct sk_buff *skb, struct ob_mac_iocb_req *mac_iocb_ptr) { const struct iphdr *ip = ip_hdr(skb); mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); mac_iocb_ptr->ip_hdr_len = ip->ihl; if (ip->protocol == IPPROTO_TCP) { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | OB_3032MAC_IOCB_REQ_IC; } else { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | OB_3032MAC_IOCB_REQ_IC; } } /* * Map the buffers for this transmit. This will return * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */ static int ql_send_map(struct ql3_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, struct ql_tx_buf_cb *tx_cb, struct sk_buff *skb) { struct oal *oal; struct oal_entry *oal_entry; int len = skb_headlen(skb); dma_addr_t map; int err; int completed_segs, i; int seg_cnt, seg = 0; int frag_cnt = (int)skb_shinfo(skb)->nr_frags; seg_cnt = tx_cb->seg_count; /* * Map the skb buffer first. */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); return NETDEV_TX_BUSY; } oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(len); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, len); seg++; if (seg_cnt == 1) { /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); } else { oal = tx_cb->oal; for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; oal_entry++; if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ (seg == 12 && seg_cnt > 13) || /* but necessary. */ (seg == 17 && seg_cnt > 18)) { /* Continuation entry points to outbound address list. */ map = pci_map_single(qdev->pdev, oal, sizeof(struct oal), PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", qdev->ndev->name, err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(sizeof(struct oal) | OAL_CONT_ENTRY); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, sizeof(struct oal)); oal_entry = (struct oal_entry *)oal; oal++; seg++; } map = pci_map_page(qdev->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", qdev->ndev->name, err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(frag->size); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); } /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); } return NETDEV_TX_OK; map_error: /* A PCI mapping failed and now we will need to back out * We need to traverse through the oal's and associated pages which * have been mapped and now we must unmap them to clean up properly */ seg = 1; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal = tx_cb->oal; for (i=0; i<completed_segs; i++,seg++) { oal_entry++; if((seg == 2 && seg_cnt > 3) || /* Check for continuation */ (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ (seg == 12 && seg_cnt > 13) || /* but necessary. */ (seg == 17 && seg_cnt > 18)) { pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[seg], mapaddr), pci_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); oal++; seg++; } pci_unmap_page(qdev->pdev, pci_unmap_addr(&tx_cb->map[seg], mapaddr), pci_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); } pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[0], mapaddr), pci_unmap_addr(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); return NETDEV_TX_BUSY; } /* * The difference between 3022 and 3032 sends: * 3022 only supports a simple single segment transmission. * 3032 supports checksumming and scatter/gather lists (fragments). * The 3032 supports sglists by using the 3 addr/len pairs (ALP) * in the IOCB plus a chain of outbound address lists (OAL) that * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) * will used to point to an OAL when more ALP entries are required. * The IOCB is always the top of the chain followed by one or more * OALs (when necessary). */ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct ql_tx_buf_cb *tx_cb; u32 tot_len = skb->len; struct ob_mac_iocb_req *mac_iocb_ptr; if (unlikely(atomic_read(&qdev->tx_count) < 2)) { return NETDEV_TX_BUSY; } tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; if((tx_cb->seg_count = ql_get_seg_count(qdev, (skb_shinfo(skb)->nr_frags))) == -1) { printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); return NETDEV_TX_OK; } mac_iocb_ptr = tx_cb->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); mac_iocb_ptr->opcode = qdev->mac_ob_opcode; mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; mac_iocb_ptr->flags |= qdev->mb_bit_mask; mac_iocb_ptr->transaction_id = qdev->req_producer_index; mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); tx_cb->skb = skb; if (qdev->device_id == QL3032_DEVICE_ID && skb->ip_summed == CHECKSUM_PARTIAL) ql_hw_csum_setup(skb, mac_iocb_ptr); if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); return NETDEV_TX_BUSY; } wmb(); qdev->req_producer_index++; if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) qdev->req_producer_index = 0; wmb(); ql_write_common_reg_l(qdev, &port_regs->CommonRegs.reqQProducerIndex, qdev->req_producer_index); ndev->trans_start = jiffies; if (netif_msg_tx_queued(qdev)) printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", ndev->name, qdev->req_producer_index, skb->len); atomic_dec(&qdev->tx_count); return NETDEV_TX_OK; } static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) { qdev->req_q_size = (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); qdev->req_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->req_q_size, &qdev->req_q_phy_addr); if ((qdev->req_q_virt_addr == NULL) || LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { printk(KERN_ERR PFX "%s: reqQ failed.\n", qdev->ndev->name); return -ENOMEM; } qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); qdev->rsp_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->rsp_q_size, &qdev->rsp_q_phy_addr); if ((qdev->rsp_q_virt_addr == NULL) || LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { printk(KERN_ERR PFX "%s: rspQ allocation failed\n", qdev->ndev->name); pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); return -ENOMEM; } set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); return 0; } static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; } pci_free_consistent(qdev->pdev, qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); qdev->req_q_virt_addr = NULL; pci_free_consistent(qdev->pdev, qdev->rsp_q_size, qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); qdev->rsp_q_virt_addr = NULL; clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); } static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) { /* Create Large Buffer Queue */ qdev->lrg_buf_q_size = qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); if (qdev->lrg_buf_q_size < PAGE_SIZE) qdev->lrg_buf_q_alloc_size = PAGE_SIZE; else qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); if (qdev->lrg_buf == NULL) { printk(KERN_ERR PFX "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); return -ENOMEM; } qdev->lrg_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, &qdev->lrg_buf_q_alloc_phy_addr); if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { printk(KERN_ERR PFX "%s: lBufQ failed\n", qdev->ndev->name); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; /* Create Small Buffer Queue */ qdev->small_buf_q_size = NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); if (qdev->small_buf_q_size < PAGE_SIZE) qdev->small_buf_q_alloc_size = PAGE_SIZE; else qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; qdev->small_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->small_buf_q_alloc_size, &qdev->small_buf_q_alloc_phy_addr); if (qdev->small_buf_q_alloc_virt_addr == NULL) { printk(KERN_ERR PFX "%s: Small Buffer Queue allocation failed.\n", qdev->ndev->name); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); return -ENOMEM; } qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); return 0; } static void ql_free_buffer_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; } if(qdev->lrg_buf) kfree(qdev->lrg_buf); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); qdev->lrg_buf_q_virt_addr = NULL; pci_free_consistent(qdev->pdev, qdev->small_buf_q_alloc_size, qdev->small_buf_q_alloc_virt_addr, qdev->small_buf_q_alloc_phy_addr); qdev->small_buf_q_virt_addr = NULL; clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); } static int ql_alloc_small_buffers(struct ql3_adapter *qdev) { int i; struct bufq_addr_element *small_buf_q_entry; /* Currently we allocate on one of memory and use it for smallbuffers */ qdev->small_buf_total_size = (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * QL_SMALL_BUFFER_SIZE); qdev->small_buf_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->small_buf_total_size, &qdev->small_buf_phy_addr); if (qdev->small_buf_virt_addr == NULL) { printk(KERN_ERR PFX "%s: Failed to get small buffer memory.\n", qdev->ndev->name); return -ENOMEM; } qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); small_buf_q_entry = qdev->small_buf_q_virt_addr; /* Initialize the small buffer queue. */ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { small_buf_q_entry->addr_high = cpu_to_le32(qdev->small_buf_phy_addr_high); small_buf_q_entry->addr_low = cpu_to_le32(qdev->small_buf_phy_addr_low + (i * QL_SMALL_BUFFER_SIZE)); small_buf_q_entry++; } qdev->small_buf_index = 0; set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); return 0; } static void ql_free_small_buffers(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; } if (qdev->small_buf_virt_addr != NULL) { pci_free_consistent(qdev->pdev, qdev->small_buf_total_size, qdev->small_buf_virt_addr, qdev->small_buf_phy_addr); qdev->small_buf_virt_addr = NULL; } } static void ql_free_large_buffers(struct ql3_adapter *qdev) { int i = 0; struct ql_rcv_buf_cb *lrg_buf_cb; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; if (lrg_buf_cb->skb) { dev_kfree_skb(lrg_buf_cb->skb); pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb, mapaddr), pci_unmap_len(lrg_buf_cb, maplen), PCI_DMA_FROMDEVICE); memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); } else { break; } } } static void ql_init_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; buf_addr_ele++; } qdev->lrg_buf_index = 0; qdev->lrg_buf_skb_check = 0; } static int ql_alloc_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct sk_buff *skb; dma_addr_t map; int err; for (i = 0; i < qdev->num_large_buffers; i++) { skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ printk(KERN_ERR PFX "%s: large buff alloc failed, " "for %d bytes at index %d.\n", qdev->ndev->name, qdev->lrg_buffer_len * 2, i); ql_free_large_buffers(qdev); return -ENOMEM; } else { lrg_buf_cb = &qdev->lrg_buf[i]; memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer */ skb_reserve(skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); ql_free_large_buffers(qdev); return -ENOMEM; } pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); pci_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); } } return 0; } static void ql_free_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; tx_cb = &qdev->tx_buf[0]; for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { if (tx_cb->oal) { kfree(tx_cb->oal); tx_cb->oal = NULL; } tx_cb++; } } static int ql_create_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; /* Create free list of transmit buffers */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { tx_cb = &qdev->tx_buf[i]; tx_cb->skb = NULL; tx_cb->queue_entry = req_q_curr; req_q_curr++; tx_cb->oal = kmalloc(512, GFP_KERNEL); if (tx_cb->oal == NULL) return -1; } return 0; } static int ql_alloc_mem_resources(struct ql3_adapter *qdev) { if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = NORMAL_MTU_SIZE; } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { /* * Bigger buffers, so less of them. */ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = JUMBO_MTU_SIZE; } else { printk(KERN_ERR PFX "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", qdev->ndev->name); return -ENOMEM; } qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; qdev->max_frame_size = (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; /* * First allocate a page of shared memory and use it for shadow * locations of Network Request Queue Consumer Address Register and * Network Completion Queue Producer Index Register */ qdev->shadow_reg_virt_addr = pci_alloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->shadow_reg_phy_addr); if (qdev->shadow_reg_virt_addr != NULL) { qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; qdev->req_consumer_index_phy_addr_high = MS_64BITS(qdev->shadow_reg_phy_addr); qdev->req_consumer_index_phy_addr_low = LS_64BITS(qdev->shadow_reg_phy_addr); qdev->prsp_producer_index = (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); qdev->rsp_producer_index_phy_addr_high = qdev->req_consumer_index_phy_addr_high; qdev->rsp_producer_index_phy_addr_low = qdev->req_consumer_index_phy_addr_low + 8; } else { printk(KERN_ERR PFX "%s: shadowReg Alloc failed.\n", qdev->ndev->name); return -ENOMEM; } if (ql_alloc_net_req_rsp_queues(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_net_req_rsp_queues failed.\n", qdev->ndev->name); goto err_req_rsp; } if (ql_alloc_buffer_queues(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_buffer_queues failed.\n", qdev->ndev->name); goto err_buffer_queues; } if (ql_alloc_small_buffers(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name); goto err_small_buffers; } if (ql_alloc_large_buffers(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name); goto err_small_buffers; } /* Initialize the large buffer queue. */ ql_init_large_buffers(qdev); if (ql_create_send_free_list(qdev)) goto err_free_list; qdev->rsp_current = qdev->rsp_q_virt_addr; return 0; err_free_list: ql_free_send_free_list(qdev); err_small_buffers: ql_free_buffer_queues(qdev); err_buffer_queues: ql_free_net_req_rsp_queues(qdev); err_req_rsp: pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); return -ENOMEM; } static void ql_free_mem_resources(struct ql3_adapter *qdev) { ql_free_send_free_list(qdev); ql_free_large_buffers(qdev); ql_free_small_buffers(qdev); ql_free_buffer_queues(qdev); ql_free_net_req_rsp_queues(qdev); if (qdev->shadow_reg_virt_addr != NULL) { pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); qdev->shadow_reg_virt_addr = NULL; } } static int ql_init_misc_registers(struct ql3_adapter *qdev) { struct ql3xxx_local_ram_registers __iomem *local_ram = (void __iomem *)qdev->mem_map_registers; if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 4)) return -1; ql_write_page2_reg(qdev, &local_ram->bufletSize, qdev->nvram_data.bufletSize); ql_write_page2_reg(qdev, &local_ram->maxBufletCount, qdev->nvram_data.bufletCount); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdLow, (qdev->nvram_data.tcpWindowThreshold25 << 16) | (qdev->nvram_data.tcpWindowThreshold0)); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdHigh, qdev->nvram_data.tcpWindowThreshold50); ql_write_page2_reg(qdev, &local_ram->ipHashTableBase, (qdev->nvram_data.ipHashTableBaseHi << 16) | qdev->nvram_data.ipHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->ipHashTableCount, qdev->nvram_data.ipHashTableSize); ql_write_page2_reg(qdev, &local_ram->tcpHashTableBase, (qdev->nvram_data.tcpHashTableBaseHi << 16) | qdev->nvram_data.tcpHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->tcpHashTableCount, qdev->nvram_data.tcpHashTableSize); ql_write_page2_reg(qdev, &local_ram->ncbBase, (qdev->nvram_data.ncbTableBaseHi << 16) | qdev->nvram_data.ncbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxNcbCount, qdev->nvram_data.ncbTableSize); ql_write_page2_reg(qdev, &local_ram->drbBase, (qdev->nvram_data.drbTableBaseHi << 16) | qdev->nvram_data.drbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxDrbCount, qdev->nvram_data.drbTableSize); ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); return 0; } static int ql_adapter_initialize(struct ql3_adapter *qdev) { u32 value; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct ql3xxx_host_memory_registers __iomem *hmem_regs = (void __iomem *)port_regs; u32 delay = 10; int status = 0; if(ql_mii_setup(qdev)) return -1; /* Bring out PHY out of reset */ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, (ISP_SERIAL_PORT_IF_WE | (ISP_SERIAL_PORT_IF_WE << 16))); qdev->port_link_state = LS_DOWN; netif_carrier_off(qdev->ndev); /* V2 chip fix for ARS-39168. */ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, (ISP_SERIAL_PORT_IF_SDE | (ISP_SERIAL_PORT_IF_SDE << 16))); /* Request Queue Registers */ *((u32 *) (qdev->preq_consumer_index)) = 0; atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); qdev->req_producer_index = 0; ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrHigh, qdev->req_consumer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrLow, qdev->req_consumer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrHigh, MS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrLow, LS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); /* Response Queue Registers */ *((__le16 *) (qdev->prsp_producer_index)) = 0; qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrHigh, qdev->rsp_producer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrLow, qdev->rsp_producer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrHigh, MS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrLow, LS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); /* Large Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrHigh, MS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrLow, LS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); ql_write_page1_reg(qdev, &hmem_regs->rxLargeBufferLength, qdev->lrg_buffer_len); /* Small Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrHigh, MS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrLow, LS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); ql_write_page1_reg(qdev, &hmem_regs->rxSmallBufferLength, QL_SMALL_BUFFER_SIZE); qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; qdev->small_buf_release_cnt = 8; qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; qdev->lrg_buf_release_cnt = 8; qdev->lrg_buf_next_free = (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; qdev->small_buf_index = 0; qdev->lrg_buf_index = 0; qdev->lrg_buf_free_count = 0; qdev->lrg_buf_free_head = NULL; qdev->lrg_buf_free_tail = NULL; ql_write_common_reg(qdev, &port_regs->CommonRegs. rxSmallQProducerIndex, qdev->small_buf_q_producer_index); ql_write_common_reg(qdev, &port_regs->CommonRegs. rxLargeQProducerIndex, qdev->lrg_buf_q_producer_index); /* * Find out if the chip has already been initialized. If it has, then * we skip some of the initialization. */ clear_bit(QL_LINK_MASTER, &qdev->flags); value = ql_read_page0_reg(qdev, &port_regs->portStatus); if ((value & PORT_STATUS_IC) == 0) { /* Chip has not been configured yet, so let it rip. */ if(ql_init_misc_registers(qdev)) { status = -1; goto out; } value = qdev->nvram_data.tcpMaxWindowSize; ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 13)) { status = -1; goto out; } ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 16) | (INTERNAL_CHIP_SD | INTERNAL_CHIP_WE))); ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); } if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1MaxFrameLengthReg, qdev->max_frame_size); else ql_write_page0_reg(qdev, &port_regs->mac0MaxFrameLengthReg, qdev->max_frame_size); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { status = -1; goto out; } PHY_Setup(qdev); ql_init_scan_mode(qdev); ql_get_phy_owner(qdev); /* Load the MAC Configuration */ /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[2] << 24) | (qdev->ndev->dev_addr[3] << 16) | (qdev->ndev->dev_addr[4] << 8) | qdev->ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[0] << 8) | qdev->ndev->dev_addr[1])); /* Enable Primary MAC */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | MAC_ADDR_INDIRECT_PTR_REG_PE)); /* Clear Primary and Secondary IP addresses */ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | (qdev->mac_index << 2))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | ((qdev->mac_index << 2) + 1))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); /* Indicate Configuration Complete */ ql_write_page0_reg(qdev, &port_regs->portControl, ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); do { value = ql_read_page0_reg(qdev, &port_regs->portStatus); if (value & PORT_STATUS_IC) break; msleep(500); } while (--delay); if (delay == 0) { printk(KERN_ERR PFX "%s: Hw Initialization timeout.\n", qdev->ndev->name); status = -1; goto out; } /* Enable Ethernet Function */ if (qdev->device_id == QL3032_DEVICE_ID) { value = (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | QL3032_PORT_CONTROL_ET); ql_write_page0_reg(qdev, &port_regs->functionControl, ((value << 16) | value)); } else { value = (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | PORT_CONTROL_HH); ql_write_page0_reg(qdev, &port_regs->portControl, ((value << 16) | value)); } out: return status; } /* * Caller holds hw_lock. */ static int ql_adapter_reset(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; int status = 0; u16 value; int max_wait_time; set_bit(QL_RESET_ACTIVE, &qdev->flags); clear_bit(QL_RESET_DONE, &qdev->flags); /* * Issue soft reset to chip. */ printk(KERN_DEBUG PFX "%s: Issue soft reset to chip.\n", qdev->ndev->name); ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); /* Wait 3 seconds for reset to complete. */ printk(KERN_DEBUG PFX "%s: Wait 10 milliseconds for reset to complete.\n", qdev->ndev->name); /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) break; ssleep(1); } while ((--max_wait_time)); /* * Also, make sure that the Network Reset Interrupt bit has been * cleared after the soft reset has taken place. */ value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & ISP_CONTROL_RI) { printk(KERN_DEBUG PFX "ql_adapter_reset: clearing RI after reset.\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } if (max_wait_time == 0) { /* Issue Force Soft Reset */ ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_FSR << 16) | ISP_CONTROL_FSR)); /* * Wait until the firmware tells us the Force Soft Reset is * done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_FSR) == 0) { break; } ssleep(1); } while ((--max_wait_time)); } if (max_wait_time == 0) status = 1; clear_bit(QL_RESET_ACTIVE, &qdev->flags); set_bit(QL_RESET_DONE, &qdev->flags); return status; } static void ql_set_mac_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value, port_status; u8 func_number; /* Get the function number */ value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); switch (value & ISP_CONTROL_FN_MASK) { case ISP_CONTROL_FN0_NET: qdev->mac_index = 0; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN0_MA_BITS_MASK; qdev->PHYAddr = PORT0_PHY_ADDRESS; if (port_status & PORT_STATUS_SM0) set_bit(QL_LINK_OPTICAL,&qdev->flags); else clear_bit(QL_LINK_OPTICAL,&qdev->flags); break; case ISP_CONTROL_FN1_NET: qdev->mac_index = 1; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN1_MA_BITS_MASK; qdev->PHYAddr = PORT1_PHY_ADDRESS; if (port_status & PORT_STATUS_SM1) set_bit(QL_LINK_OPTICAL,&qdev->flags); else clear_bit(QL_LINK_OPTICAL,&qdev->flags); break; case ISP_CONTROL_FN0_SCSI: case ISP_CONTROL_FN1_SCSI: default: printk(KERN_DEBUG PFX "%s: Invalid function number, ispControlStatus = 0x%x\n", qdev->ndev->name,value); break; } qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; } static void ql_display_dev_info(struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; DECLARE_MAC_BUF(mac); printk(KERN_INFO PFX "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", DRV_NAME, qdev->index, qdev->chip_rev_id, (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", qdev->pci_slot); printk(KERN_INFO PFX "%s Interface.\n", test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); /* * Print PCI bus width/type. */ printk(KERN_INFO PFX "Bus interface is %s %s.\n", ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), ((qdev->pci_x) ? "PCI-X" : "PCI")); printk(KERN_INFO PFX "mem IO base address adjusted = 0x%p\n", qdev->mem_map_registers); printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq); if (netif_msg_probe(qdev)) printk(KERN_INFO PFX "%s: MAC address %s\n", ndev->name, print_mac(mac, ndev->dev_addr)); } static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) { struct net_device *ndev = qdev->ndev; int retval = 0; netif_stop_queue(ndev); netif_carrier_off(ndev); clear_bit(QL_ADAPTER_UP,&qdev->flags); clear_bit(QL_LINK_MASTER,&qdev->flags); ql_disable_interrupts(qdev); free_irq(qdev->pdev->irq, ndev); if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { printk(KERN_INFO PFX "%s: calling pci_disable_msi().\n", qdev->ndev->name); clear_bit(QL_MSI_ENABLED,&qdev->flags); pci_disable_msi(qdev->pdev); } del_timer_sync(&qdev->adapter_timer); napi_disable(&qdev->napi); if (do_reset) { int soft_reset; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_wait_for_drvr_lock(qdev)) { if ((soft_reset = ql_adapter_reset(qdev))) { printk(KERN_ERR PFX "%s: ql_adapter_reset(%d) FAILED!\n", ndev->name, qdev->index); } printk(KERN_ERR PFX "%s: Releaseing driver lock via chip reset.\n",ndev->name); } else { printk(KERN_ERR PFX "%s: Could not acquire driver lock to do " "reset!\n", ndev->name); retval = -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); } ql_free_mem_resources(qdev); return retval; } static int ql_adapter_up(struct ql3_adapter *qdev) { struct net_device *ndev = qdev->ndev; int err; unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED; unsigned long hw_flags; if (ql_alloc_mem_resources(qdev)) { printk(KERN_ERR PFX "%s Unable to allocate buffers.\n", ndev->name); return -ENOMEM; } if (qdev->msi) { if (pci_enable_msi(qdev->pdev)) { printk(KERN_ERR PFX "%s: User requested MSI, but MSI failed to " "initialize. Continuing without MSI.\n", qdev->ndev->name); qdev->msi = 0; } else { printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); set_bit(QL_MSI_ENABLED,&qdev->flags); irq_flags &= ~IRQF_SHARED; } } if ((err = request_irq(qdev->pdev->irq, ql3xxx_isr, irq_flags, ndev->name, ndev))) { printk(KERN_ERR PFX "%s: Failed to reserve interrupt %d already in use.\n", ndev->name, qdev->pdev->irq); goto err_irq; } spin_lock_irqsave(&qdev->hw_lock, hw_flags); if ((err = ql_wait_for_drvr_lock(qdev))) { if ((err = ql_adapter_initialize(qdev))) { printk(KERN_ERR PFX "%s: Unable to initialize adapter.\n", ndev->name); goto err_init; } printk(KERN_ERR PFX "%s: Releaseing driver lock.\n",ndev->name); ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); } else { printk(KERN_ERR PFX "%s: Could not aquire driver lock.\n", ndev->name); goto err_lock; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP,&qdev->flags); mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); napi_enable(&qdev->napi); ql_enable_interrupts(qdev); return 0; err_init: ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); err_lock: spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); free_irq(qdev->pdev->irq, ndev); err_irq: if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { printk(KERN_INFO PFX "%s: calling pci_disable_msi().\n", qdev->ndev->name); clear_bit(QL_MSI_ENABLED,&qdev->flags); pci_disable_msi(qdev->pdev); } return err; } static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) { if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { printk(KERN_ERR PFX "%s: Driver up/down cycle failed, " "closing device\n",qdev->ndev->name); rtnl_lock(); dev_close(qdev->ndev); rtnl_unlock(); return -1; } return 0; } static int ql3xxx_close(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); /* * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) msleep(50); ql_adapter_down(qdev,QL_DO_RESET); return 0; } static int ql3xxx_open(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return (ql_adapter_up(qdev)); } static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct sockaddr *addr = p; unsigned long hw_flags; if (netif_running(ndev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); spin_lock_irqsave(&qdev->hw_lock, hw_flags); /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[2] << 24) | (ndev-> dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } static void ql3xxx_tx_timeout(struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); /* * Stop the queues, we've got a problem. */ netif_stop_queue(ndev); /* * Wake up the worker to process this event. */ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); } static void ql_reset_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, reset_work.work); struct net_device *ndev = qdev->ndev; u32 value; struct ql_tx_buf_cb *tx_cb; int max_wait_time, i; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; unsigned long hw_flags; if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { clear_bit(QL_LINK_MASTER,&qdev->flags); /* * Loop through the active list and return the skb. */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { int j; tx_cb = &qdev->tx_buf[i]; if (tx_cb->skb) { printk(KERN_DEBUG PFX "%s: Freeing lost SKB.\n", qdev->ndev->name); pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[0], mapaddr), pci_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); for(j=1;j<tx_cb->seg_count;j++) { pci_unmap_page(qdev->pdev, pci_unmap_addr(&tx_cb->map[j],mapaddr), pci_unmap_len(&tx_cb->map[j],maplen), PCI_DMA_TODEVICE); } dev_kfree_skb(tx_cb->skb); tx_cb->skb = NULL; } } printk(KERN_ERR PFX "%s: Clearing NRI after reset.\n", qdev->ndev->name); spin_lock_irqsave(&qdev->hw_lock, hw_flags); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); /* * Wait the for Soft Reset to Complete. */ max_wait_time = 10; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) { printk(KERN_DEBUG PFX "%s: reset completed.\n", qdev->ndev->name); break; } if (value & ISP_CONTROL_RI) { printk(KERN_DEBUG PFX "%s: clearing NRI after reset.\n", qdev->ndev->name); ql_write_common_reg(qdev, &port_regs-> CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } ssleep(1); } while (--max_wait_time); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); if (value & ISP_CONTROL_SR) { /* * Set the reset flags and clear the board again. * Nothing else to do... */ printk(KERN_ERR PFX "%s: Timed out waiting for reset to " "complete.\n", ndev->name); printk(KERN_ERR PFX "%s: Do a reset.\n", ndev->name); clear_bit(QL_RESET_PER_SCSI,&qdev->flags); clear_bit(QL_RESET_START,&qdev->flags); ql_cycle_adapter(qdev,QL_DO_RESET); return; } clear_bit(QL_RESET_ACTIVE,&qdev->flags); clear_bit(QL_RESET_PER_SCSI,&qdev->flags); clear_bit(QL_RESET_START,&qdev->flags); ql_cycle_adapter(qdev,QL_NO_RESET); } } static void ql_tx_timeout_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, tx_timeout_work.work); ql_cycle_adapter(qdev, QL_DO_RESET); } static void ql_get_board_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); if (value & PORT_STATUS_64) qdev->pci_width = 64; else qdev->pci_width = 32; if (value & PORT_STATUS_X) qdev->pci_x = 1; else qdev->pci_x = 0; qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); } static void ql3xxx_timer(unsigned long ptr) { struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); } static int __devinit ql3xxx_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; static int cards_found = 0; int pci_using_dac, err; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "%s cannot enable PCI device\n", pci_name(pdev)); goto err_out; } err = pci_request_regions(pdev, DRV_NAME); if (err) { printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", pci_name(pdev)); goto err_out_disable_pdev; } pci_set_master(pdev); if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { pci_using_dac = 0; err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); } if (err) { printk(KERN_ERR PFX "%s no usable DMA configuration\n", pci_name(pdev)); goto err_out_free_regions; } ndev = alloc_etherdev(sizeof(struct ql3_adapter)); if (!ndev) { printk(KERN_ERR PFX "%s could not alloc etherdev\n", pci_name(pdev)); err = -ENOMEM; goto err_out_free_regions; } SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, ndev); qdev = netdev_priv(ndev); qdev->index = cards_found; qdev->ndev = ndev; qdev->pdev = pdev; qdev->device_id = pci_entry->device; qdev->port_link_state = LS_DOWN; if (msi) qdev->msi = 1; qdev->msg_enable = netif_msg_init(debug, default_msg); if (pci_using_dac) ndev->features |= NETIF_F_HIGHDMA; if (qdev->device_id == QL3032_DEVICE_ID) ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; qdev->mem_map_registers = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(qdev->pdev, 1)); if (!qdev->mem_map_registers) { printk(KERN_ERR PFX "%s: cannot map device registers\n", pci_name(pdev)); err = -EIO; goto err_out_free_ndev; } spin_lock_init(&qdev->adapter_lock); spin_lock_init(&qdev->hw_lock); /* Set driver entry points */ ndev->open = ql3xxx_open; ndev->hard_start_xmit = ql3xxx_send; ndev->stop = ql3xxx_close; /* ndev->set_multicast_list * This device is one side of a two-function adapter * (NIC and iSCSI). Promiscuous mode setting/clearing is * not allowed from the NIC side. */ SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); ndev->set_mac_address = ql3xxx_set_mac_address; ndev->tx_timeout = ql3xxx_tx_timeout; ndev->watchdog_timeo = 5 * HZ; netif_napi_add(ndev, &qdev->napi, ql_poll, 64); ndev->irq = pdev->irq; /* make sure the EEPROM is good */ if (ql_get_nvram_params(qdev)) { printk(KERN_ALERT PFX "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", qdev->index); err = -EIO; goto err_out_iounmap; } ql_set_mac_info(qdev); /* Validate and set parameters */ if (qdev->mac_index) { ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); } else { ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); } memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; /* Record PCI bus information. */ ql_get_board_info(qdev); /* * Set the Maximum Memory Read Byte Count value. We do this to handle * jumbo frames. */ if (qdev->pci_x) { pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); } err = register_netdev(ndev); if (err) { printk(KERN_ERR PFX "%s: cannot register net device\n", pci_name(pdev)); goto err_out_iounmap; } /* we're going to reset, so assume we have no link for now */ netif_carrier_off(ndev); netif_stop_queue(ndev); qdev->workqueue = create_singlethread_workqueue(ndev->name); INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); init_timer(&qdev->adapter_timer); qdev->adapter_timer.function = ql3xxx_timer; qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ qdev->adapter_timer.data = (unsigned long)qdev; if(!cards_found) { printk(KERN_ALERT PFX "%s\n", DRV_STRING); printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", DRV_NAME, DRV_VERSION); } ql_display_dev_info(ndev); cards_found++; return 0; err_out_iounmap: iounmap(qdev->mem_map_registers); err_out_free_ndev: free_netdev(ndev); err_out_free_regions: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); err_out: return err; } static void __devexit ql3xxx_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql3_adapter *qdev = netdev_priv(ndev); unregister_netdev(ndev); qdev = netdev_priv(ndev); ql_disable_interrupts(qdev); if (qdev->workqueue) { cancel_delayed_work(&qdev->reset_work); cancel_delayed_work(&qdev->tx_timeout_work); destroy_workqueue(qdev->workqueue); qdev->workqueue = NULL; } iounmap(qdev->mem_map_registers); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(ndev); } static struct pci_driver ql3xxx_driver = { .name = DRV_NAME, .id_table = ql3xxx_pci_tbl, .probe = ql3xxx_probe, .remove = __devexit_p(ql3xxx_remove), }; static int __init ql3xxx_init_module(void) { return pci_register_driver(&ql3xxx_driver); } static void __exit ql3xxx_exit(void) { pci_unregister_driver(&ql3xxx_driver); } module_init(ql3xxx_init_module); module_exit(ql3xxx_exit);