aboutsummaryrefslogblamecommitdiffstats
path: root/crypto/api.c
blob: 0a0f41ef255ff4b48e69cebdf26db12157a72c52 (plain) (tree)
1
2
3
4
5
6
7
8
9




                                                             
                                                              

                                                                         
                                






                                                                             
 
                      
                        
                         
                       
                         
                        
                        
                       
                         


                           
                                   
                              
                                  
 


                                     
                                                                       
 



                                     
                                                         

                                                                            
 
                                  
 
                                           
 

                                                
                            
                           
 
                                  
 
                                                                            

                                          
                      
 
                                                            

                                 


                                          






                                                              




                                                                 
                                                 



                                       
                                            


                          
                              
         














                                                         

                                                                         





                                                      
                                        
 

                                                         







                                                                 
                                                    











                                                           
                                               





                                                   
                                          

                            
                                      






                                                                                




                                               




                                     

                                                                       


                               
                                   
                                                    
                                 
 


                   
                                                                             
 
                               
 



                                                       



                                                                          


                                                                             










                                                                              
                                                        

                              






                                                                   


                                                 
                                       


                                   
 
                                         
 
                                                                      
 
                                                                      
 

                                                       
 



















                                                     







                                                                  


















                                              
                                                                              
 
                                                           

                         
                                                                     

                                                                
 




                                                        
                                                  


                                    
                                                  


                                      
                                                    


                      
                   

 







                                             

                                                                       

                                      
                              
                          
 
                                                                  
                                            
                        
                             
 
                             
 
                                               
                
                                  
 


                                                          
                                     
         


                 

                             

                   
        
                           


                   

                                      



























                                                                               

                                           
                                 
                 
 
                                                          
                                 
                                   










                                              
         
 
                            









                                                                              

                                            







                                               
 

                                   
                             
                            



                             
                                   













                                                                         


                                             
/*
 * Scatterlist Cryptographic API.
 *
 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
 * and Nettle, by Niels Möller.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option) 
 * any later version.
 *
 */

#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/param.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"

LIST_HEAD(crypto_alg_list);
EXPORT_SYMBOL_GPL(crypto_alg_list);
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);

BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);

static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
	atomic_inc(&alg->cra_refcnt);
	return alg;
}

struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{
	return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
}
EXPORT_SYMBOL_GPL(crypto_mod_get);

void crypto_mod_put(struct crypto_alg *alg)
{
	struct module *module = alg->cra_module;

	crypto_alg_put(alg);
	module_put(module);
}
EXPORT_SYMBOL_GPL(crypto_mod_put);

struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask)
{
	struct crypto_alg *q, *alg = NULL;
	int best = -2;

	list_for_each_entry(q, &crypto_alg_list, cra_list) {
		int exact, fuzzy;

		if (crypto_is_moribund(q))
			continue;

		if ((q->cra_flags ^ type) & mask)
			continue;

		if (crypto_is_larval(q) &&
		    ((struct crypto_larval *)q)->mask != mask)
			continue;

		exact = !strcmp(q->cra_driver_name, name);
		fuzzy = !strcmp(q->cra_name, name);
		if (!exact && !(fuzzy && q->cra_priority > best))
			continue;

		if (unlikely(!crypto_mod_get(q)))
			continue;

		best = q->cra_priority;
		if (alg)
			crypto_mod_put(alg);
		alg = q;

		if (exact)
			break;
	}

	return alg;
}
EXPORT_SYMBOL_GPL(__crypto_alg_lookup);

static void crypto_larval_destroy(struct crypto_alg *alg)
{
	struct crypto_larval *larval = (void *)alg;

	BUG_ON(!crypto_is_larval(alg));
	if (larval->adult)
		crypto_mod_put(larval->adult);
	kfree(larval);
}

static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type,
					      u32 mask)
{
	struct crypto_alg *alg;
	struct crypto_larval *larval;

	larval = kzalloc(sizeof(*larval), GFP_KERNEL);
	if (!larval)
		return ERR_PTR(-ENOMEM);

	larval->mask = mask;
	larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
	larval->alg.cra_priority = -1;
	larval->alg.cra_destroy = crypto_larval_destroy;

	atomic_set(&larval->alg.cra_refcnt, 2);
	strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
	init_completion(&larval->completion);

	down_write(&crypto_alg_sem);
	alg = __crypto_alg_lookup(name, type, mask);
	if (!alg) {
		alg = &larval->alg;
		list_add(&alg->cra_list, &crypto_alg_list);
	}
	up_write(&crypto_alg_sem);

	if (alg != &larval->alg)
		kfree(larval);

	return alg;
}

void crypto_larval_kill(struct crypto_alg *alg)
{
	struct crypto_larval *larval = (void *)alg;

	down_write(&crypto_alg_sem);
	list_del(&alg->cra_list);
	up_write(&crypto_alg_sem);
	complete_all(&larval->completion);
	crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_larval_kill);

static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
{
	struct crypto_larval *larval = (void *)alg;

	wait_for_completion_interruptible_timeout(&larval->completion, 60 * HZ);
	alg = larval->adult;
	if (alg) {
		if (!crypto_mod_get(alg))
			alg = ERR_PTR(-EAGAIN);
	} else
		alg = ERR_PTR(-ENOENT);
	crypto_mod_put(&larval->alg);

	return alg;
}

static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
					    u32 mask)
{
	struct crypto_alg *alg;

	down_read(&crypto_alg_sem);
	alg = __crypto_alg_lookup(name, type, mask);
	up_read(&crypto_alg_sem);

	return alg;
}

struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
{
	struct crypto_alg *alg;

	if (!name)
		return ERR_PTR(-ENOENT);

	mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
	type &= mask;

	alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
				      name);
	if (alg)
		return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;

	return crypto_larval_alloc(name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_larval_lookup);

struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
{
	struct crypto_alg *alg;
	struct crypto_alg *larval;
	int ok;

	larval = crypto_larval_lookup(name, type, mask);
	if (IS_ERR(larval) || !crypto_is_larval(larval))
		return larval;

	ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval);
	if (ok == NOTIFY_DONE) {
		request_module("cryptomgr");
		ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval);
	}

	if (ok == NOTIFY_STOP)
		alg = crypto_larval_wait(larval);
	else {
		crypto_mod_put(larval);
		alg = ERR_PTR(-ENOENT);
	}
	crypto_larval_kill(larval);
	return alg;
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);

static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
	const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;

	if (type_obj)
		return type_obj->init(tfm, type, mask);

	switch (crypto_tfm_alg_type(tfm)) {
	case CRYPTO_ALG_TYPE_CIPHER:
		return crypto_init_cipher_ops(tfm);
		
	case CRYPTO_ALG_TYPE_DIGEST:
		return crypto_init_digest_ops(tfm);
		
	case CRYPTO_ALG_TYPE_COMPRESS:
		return crypto_init_compress_ops(tfm);
	
	default:
		break;
	}
	
	BUG();
	return -EINVAL;
}

static void crypto_exit_ops(struct crypto_tfm *tfm)
{
	const struct crypto_type *type = tfm->__crt_alg->cra_type;

	if (type) {
		if (type->exit)
			type->exit(tfm);
		return;
	}

	switch (crypto_tfm_alg_type(tfm)) {
	case CRYPTO_ALG_TYPE_CIPHER:
		crypto_exit_cipher_ops(tfm);
		break;
		
	case CRYPTO_ALG_TYPE_DIGEST:
		crypto_exit_digest_ops(tfm);
		break;
		
	case CRYPTO_ALG_TYPE_COMPRESS:
		crypto_exit_compress_ops(tfm);
		break;
	
	default:
		BUG();
		
	}
}

static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
{
	const struct crypto_type *type_obj = alg->cra_type;
	unsigned int len;

	len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
	if (type_obj)
		return len + type_obj->ctxsize(alg, type, mask);

	switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
	default:
		BUG();

	case CRYPTO_ALG_TYPE_CIPHER:
		len += crypto_cipher_ctxsize(alg);
		break;
		
	case CRYPTO_ALG_TYPE_DIGEST:
		len += crypto_digest_ctxsize(alg);
		break;
		
	case CRYPTO_ALG_TYPE_COMPRESS:
		len += crypto_compress_ctxsize(alg);
		break;
	}

	return len;
}

void crypto_shoot_alg(struct crypto_alg *alg)
{
	down_write(&crypto_alg_sem);
	alg->cra_flags |= CRYPTO_ALG_DYING;
	up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);

struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
				      u32 mask)
{
	struct crypto_tfm *tfm = NULL;
	unsigned int tfm_size;
	int err = -ENOMEM;

	tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
	tfm = kzalloc(tfm_size, GFP_KERNEL);
	if (tfm == NULL)
		goto out_err;

	tfm->__crt_alg = alg;

	err = crypto_init_ops(tfm, type, mask);
	if (err)
		goto out_free_tfm;

	if (alg->cra_init && (err = alg->cra_init(tfm))) {
		if (err == -EAGAIN)
			crypto_shoot_alg(alg);
		goto cra_init_failed;
	}

	goto out;

cra_init_failed:
	crypto_exit_ops(tfm);
out_free_tfm:
	kfree(tfm);
out_err:
	tfm = ERR_PTR(err);
out:
	return tfm;
}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);

/*
 *	crypto_alloc_base - Locate algorithm and allocate transform
 *	@alg_name: Name of algorithm
 *	@type: Type of algorithm
 *	@mask: Mask for type comparison
 *
 *	crypto_alloc_base() will first attempt to locate an already loaded
 *	algorithm.  If that fails and the kernel supports dynamically loadable
 *	modules, it will then attempt to load a module of the same name or
 *	alias.  If that fails it will send a query to any loaded crypto manager
 *	to construct an algorithm on the fly.  A refcount is grabbed on the
 *	algorithm which is then associated with the new transform.
 *
 *	The returned transform is of a non-determinate type.  Most people
 *	should use one of the more specific allocation functions such as
 *	crypto_alloc_blkcipher.
 *
 *	In case of error the return value is an error pointer.
 */
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
{
	struct crypto_tfm *tfm;
	int err;

	for (;;) {
		struct crypto_alg *alg;

		alg = crypto_alg_mod_lookup(alg_name, type, mask);
		if (IS_ERR(alg)) {
			err = PTR_ERR(alg);
			goto err;
		}

		tfm = __crypto_alloc_tfm(alg, type, mask);
		if (!IS_ERR(tfm))
			return tfm;

		crypto_mod_put(alg);
		err = PTR_ERR(tfm);

err:
		if (err != -EAGAIN)
			break;
		if (signal_pending(current)) {
			err = -EINTR;
			break;
		}
	}

	return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
 
/*
 *	crypto_free_tfm - Free crypto transform
 *	@tfm: Transform to free
 *
 *	crypto_free_tfm() frees up the transform and any associated resources,
 *	then drops the refcount on the associated algorithm.
 */
void crypto_free_tfm(struct crypto_tfm *tfm)
{
	struct crypto_alg *alg;
	int size;

	if (unlikely(!tfm))
		return;

	alg = tfm->__crt_alg;
	size = sizeof(*tfm) + alg->cra_ctxsize;

	if (alg->cra_exit)
		alg->cra_exit(tfm);
	crypto_exit_ops(tfm);
	crypto_mod_put(alg);
	memset(tfm, 0, size);
	kfree(tfm);
}

EXPORT_SYMBOL_GPL(crypto_free_tfm);

int crypto_has_alg(const char *name, u32 type, u32 mask)
{
	int ret = 0;
	struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
	
	if (!IS_ERR(alg)) {
		crypto_mod_put(alg);
		ret = 1;
	}
	
	return ret;
}
EXPORT_SYMBOL_GPL(crypto_has_alg);

MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */ while (limit--) { cmd = ioread16(ioaddr + MCR1); if (cmd & 0x1) break; } /* Restore MAC Address to MIDx */ adrp = (u16 *) dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); free_irq(dev->irq, dev); /* Free RX buffer */ r6040_free_rxbufs(dev); /* Free TX buffer */ r6040_free_txbufs(dev); /* Free Descriptor memory */ pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); } static int r6040_close(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); /* deleted timer */ del_timer_sync(&lp->timer); spin_lock_irq(&lp->lock); netif_stop_queue(dev); r6040_down(dev); spin_unlock_irq(&lp->lock); return 0; } /* Status of PHY CHIP */ static int phy_mode_chk(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; int phy_dat; /* PHY Link Status Check */ phy_dat = phy_read(ioaddr, lp->phy_addr, 1); if (!(phy_dat & 0x4)) phy_dat = 0x8000; /* Link Failed, full duplex */ /* PHY Chip Auto-Negotiation Status */ phy_dat = phy_read(ioaddr, lp->phy_addr, 1); if (phy_dat & 0x0020) { /* Auto Negotiation Mode */ phy_dat = phy_read(ioaddr, lp->phy_addr, 5); phy_dat &= phy_read(ioaddr, lp->phy_addr, 4); if (phy_dat & 0x140) /* Force full duplex */ phy_dat = 0x8000; else phy_dat = 0; } else { /* Force Mode */ phy_dat = phy_read(ioaddr, lp->phy_addr, 0); if (phy_dat & 0x100) phy_dat = 0x8000; else phy_dat = 0x0000; } return phy_dat; }; static void r6040_set_carrier(struct mii_if_info *mii) { if (phy_mode_chk(mii->dev)) { /* autoneg is off: Link is always assumed to be up */ if (!netif_carrier_ok(mii->dev)) netif_carrier_on(mii->dev); } else phy_mode_chk(mii->dev); } static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct r6040_private *lp = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(rq); int rc; if (!netif_running(dev)) return -EINVAL; spin_lock_irq(&lp->lock); rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL); spin_unlock_irq(&lp->lock); r6040_set_carrier(&lp->mii_if); return rc; } static int r6040_rx(struct net_device *dev, int limit) { struct r6040_private *priv = netdev_priv(dev); int count; void __iomem *ioaddr = priv->base; u16 err; for (count = 0; count < limit; ++count) { struct r6040_descriptor *descptr = priv->rx_remove_ptr; struct sk_buff *skb_ptr; /* Disable RX interrupt */ iowrite16(ioread16(ioaddr + MIER) & (~RX_INT), ioaddr + MIER); descptr = priv->rx_remove_ptr; /* Check for errors */ err = ioread16(ioaddr + MLSR); if (err & 0x0400) dev->stats.rx_errors++; /* RX FIFO over-run */ if (err & 0x8000) dev->stats.rx_fifo_errors++; /* RX descriptor unavailable */ if (err & 0x0080) dev->stats.rx_frame_errors++; /* Received packet with length over buffer lenght */ if (err & 0x0020) dev->stats.rx_over_errors++; /* Received packet with too long or short */ if (err & (0x0010 | 0x0008)) dev->stats.rx_length_errors++; /* Received packet with CRC errors */ if (err & 0x0004) { spin_lock(&priv->lock); dev->stats.rx_crc_errors++; spin_unlock(&priv->lock); } while (priv->rx_free_desc) { /* No RX packet */ if (descptr->status & 0x8000) break; skb_ptr = descptr->skb_ptr; if (!skb_ptr) { printk(KERN_ERR "%s: Inconsistent RX" "descriptor chain\n", dev->name); break; } descptr->skb_ptr = NULL; skb_ptr->dev = priv->dev; /* Do not count the CRC */ skb_put(skb_ptr, descptr->len - 4); pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); /* Send to upper layer */ netif_receive_skb(skb_ptr); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += descptr->len; /* To next descriptor */ descptr = descptr->vndescp; priv->rx_free_desc--; } priv->rx_remove_ptr = descptr; } /* Allocate new RX buffer */ if (priv->rx_free_desc < RX_DCNT) rx_buf_alloc(priv, priv->dev); return count; } static void r6040_tx(struct net_device *dev) { struct r6040_private *priv = netdev_priv(dev); struct r6040_descriptor *descptr; void __iomem *ioaddr = priv->base; struct sk_buff *skb_ptr; u16 err; spin_lock(&priv->lock); descptr = priv->tx_remove_ptr; while (priv->tx_free_desc < TX_DCNT) { /* Check for errors */ err = ioread16(ioaddr + MLSR); if (err & 0x0200) dev->stats.rx_fifo_errors++; if (err & (0x2000 | 0x4000)) dev->stats.tx_carrier_errors++; if (descptr->status & 0x8000) break; /* Not complete */ skb_ptr = descptr->skb_ptr; pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), skb_ptr->len, PCI_DMA_TODEVICE); /* Free buffer */ dev_kfree_skb_irq(skb_ptr); descptr->skb_ptr = NULL; /* To next descriptor */ descptr = descptr->vndescp; priv->tx_free_desc++; } priv->tx_remove_ptr = descptr; if (priv->tx_free_desc) netif_wake_queue(dev); spin_unlock(&priv->lock); } static int r6040_poll(struct napi_struct *napi, int budget) { struct r6040_private *priv = container_of(napi, struct r6040_private, napi); struct net_device *dev = priv->dev; void __iomem *ioaddr = priv->base; int work_done; work_done = r6040_rx(dev, budget); if (work_done < budget) { netif_rx_complete(dev, napi); /* Enable RX interrupt */ iowrite16(ioread16(ioaddr + MIER) | RX_INT, ioaddr + MIER); } return work_done; } /* The RDC interrupt handler. */ static irqreturn_t r6040_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; u16 status; /* Mask off RDC MAC interrupt */ iowrite16(MSK_INT, ioaddr + MIER); /* Read MISR status and clear */ status = ioread16(ioaddr + MISR); if (status == 0x0000 || status == 0xffff) return IRQ_NONE; /* RX interrupt request */ if (status & 0x01) { netif_rx_schedule(dev, &lp->napi); iowrite16(TX_INT, ioaddr + MIER); } /* TX interrupt request */ if (status & 0x10) r6040_tx(dev); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void r6040_poll_controller(struct net_device *dev) { disable_irq(dev->irq); r6040_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif /* Init RDC MAC */ static void r6040_up(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; /* Initialise and alloc RX/TX buffers */ r6040_alloc_txbufs(dev); r6040_alloc_rxbufs(dev); /* Buffer Size Register */ iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); /* Read the PHY ID */ lp->switch_sig = phy_read(ioaddr, 0, 2); if (lp->switch_sig == ICPLUS_PHY_ID) { phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */ lp->phy_mode = 0x8000; } else { /* PHY Mode Check */ phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP); phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE); if (PHY_MODE == 0x3100) lp->phy_mode = phy_mode_chk(dev); else lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; } /* MAC Bus Control Register */ iowrite16(MBCR_DEFAULT, ioaddr + MBCR); /* MAC TX/RX Enable */ lp->mcr0 |= lp->phy_mode; iowrite16(lp->mcr0, ioaddr); /* set interrupt waiting time and packet numbers */ iowrite16(0x0F06, ioaddr + MT_ICR); iowrite16(0x0F06, ioaddr + MR_ICR); /* improve performance (by RDC guys) */ phy_write(ioaddr, 30, 17, (phy_read(ioaddr, 30, 17) | 0x4000)); phy_write(ioaddr, 30, 17, ~((~phy_read(ioaddr, 30, 17)) | 0x2000)); phy_write(ioaddr, 0, 19, 0x0000); phy_write(ioaddr, 0, 30, 0x01F0); /* Interrupt Mask Register */ iowrite16(INT_MASK, ioaddr + MIER); } /* A periodic timer routine Polling PHY Chip Link Status */ static void r6040_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; u16 phy_mode; /* Polling PHY Chip Status */ if (PHY_MODE == 0x3100) phy_mode = phy_mode_chk(dev); else phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; if (phy_mode != lp->phy_mode) { lp->phy_mode = phy_mode; lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode; iowrite16(lp->mcr0, ioaddr); printk(KERN_INFO "Link Change %x \n", ioread16(ioaddr)); } /* Timer active again */ mod_timer(&lp->timer, round_jiffies(jiffies + HZ)); } /* Read/set MAC address routines */ static void r6040_mac_address(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; u16 *adrp; /* MAC operation register */ iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */ iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */ iowrite16(0, ioaddr + MAC_SM); udelay(5000); /* Restore MAC Address */ adrp = (u16 *) dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); } static int r6040_open(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); int ret; /* Request IRQ and Register interrupt handler */ ret = request_irq(dev->irq, &r6040_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; /* Set MAC address */ r6040_mac_address(dev); /* Allocate Descriptor memory */ lp->rx_ring = pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma); if (!lp->rx_ring) return -ENOMEM; lp->tx_ring = pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma); if (!lp->tx_ring) { pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); return -ENOMEM; } r6040_up(dev); napi_enable(&lp->napi); netif_start_queue(dev); /* set and active a timer process */ setup_timer(&lp->timer, r6040_timer, (unsigned long) dev); if (lp->switch_sig != ICPLUS_PHY_ID) mod_timer(&lp->timer, jiffies + HZ); return 0; } static int r6040_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); struct r6040_descriptor *descptr; void __iomem *ioaddr = lp->base; unsigned long flags; int ret = NETDEV_TX_OK; /* Critical Section */ spin_lock_irqsave(&lp->lock, flags); /* TX resource check */ if (!lp->tx_free_desc) { spin_unlock_irqrestore(&lp->lock, flags); netif_stop_queue(dev); printk(KERN_ERR DRV_NAME ": no tx descriptor\n"); ret = NETDEV_TX_BUSY; return ret; } /* Statistic Counter */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* Set TX descriptor & Transmit it */ lp->tx_free_desc--; descptr = lp->tx_insert_ptr; if (skb->len < MISR) descptr->len = MISR; else descptr->len = skb->len; descptr->skb_ptr = skb; descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); descptr->status = 0x8000; /* Trigger the MAC to check the TX descriptor */ iowrite16(0x01, ioaddr + MTPR); lp->tx_insert_ptr = descptr->vndescp; /* If no tx resource, stop */ if (!lp->tx_free_desc) netif_stop_queue(dev); dev->trans_start = jiffies; spin_unlock_irqrestore(&lp->lock, flags); return ret; } static void r6040_multicast_list(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; u16 *adrp; u16 reg; unsigned long flags; struct dev_mc_list *dmi = dev->mc_list; int i; /* MAC Address */ adrp = (u16 *)dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); /* Promiscous Mode */ spin_lock_irqsave(&lp->lock, flags); /* Clear AMCP & PROM bits */ reg = ioread16(ioaddr) & ~0x0120; if (dev->flags & IFF_PROMISC) { reg |= 0x0020; lp->mcr0 |= 0x0020; } /* Too many multicast addresses * accept all traffic */ else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI)) reg |= 0x0020; iowrite16(reg, ioaddr); spin_unlock_irqrestore(&lp->lock, flags); /* Build the hash table */ if (dev->mc_count > MCAST_MAX) { u16 hash_table[4]; u32 crc; for (i = 0; i < 4; i++) hash_table[i] = 0; for (i = 0; i < dev->mc_count; i++) { char *addrs = dmi->dmi_addr; dmi = dmi->next; if (!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc >>= 26; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } /* Write the index of the hash table */ for (i = 0; i < 4; i++) iowrite16(hash_table[i] << 14, ioaddr + MCR1); /* Fill the MAC hash tables with their values */ iowrite16(hash_table[0], ioaddr + MAR0); iowrite16(hash_table[1], ioaddr + MAR1); iowrite16(hash_table[2], ioaddr + MAR2); iowrite16(hash_table[3], ioaddr + MAR3); } /* Multicast Address 1~4 case */ for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) { adrp = (u16 *)dmi->dmi_addr; iowrite16(adrp[0], ioaddr + MID_1L + 8*i); iowrite16(adrp[1], ioaddr + MID_1M + 8*i); iowrite16(adrp[2], ioaddr + MID_1H + 8*i); dmi = dmi->next; } for (i = dev->mc_count; i < MCAST_MAX; i++) { iowrite16(0xffff, ioaddr + MID_0L + 8*i); iowrite16(0xffff, ioaddr + MID_0M + 8*i); iowrite16(0xffff, ioaddr + MID_0H + 8*i); } } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct r6040_private *rp = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(rp->pdev)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct r6040_private *rp = netdev_priv(dev); int rc; spin_lock_irq(&rp->lock); rc = mii_ethtool_gset(&rp->mii_if, cmd); spin_unlock_irq(&rp->lock); return rc; } static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct r6040_private *rp = netdev_priv(dev); int rc; spin_lock_irq(&rp->lock); rc = mii_ethtool_sset(&rp->mii_if, cmd); spin_unlock_irq(&rp->lock); r6040_set_carrier(&rp->mii_if); return rc; } static u32 netdev_get_link(struct net_device *dev) { struct r6040_private *rp = netdev_priv(dev); return mii_link_ok(&rp->mii_if); } static struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .get_link = netdev_get_link, }; static int __devinit r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct r6040_private *lp; void __iomem *ioaddr; int err, io_size = R6040_IO_SIZE; static int card_idx = -1; int bar = 0; long pioaddr; u16 *adrp; printk(KERN_INFO "%s\n", version); err = pci_enable_device(pdev); if (err) return err; /* this should always be supported */ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" "not supported by the card\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" "not supported by the card\n"); return -ENODEV; } /* IO Size check */ if (pci_resource_len(pdev, 0) < io_size) { printk(KERN_ERR "Insufficient PCI resources, aborting\n"); return -EIO; } pioaddr = pci_resource_start(pdev, 0); /* IO map base address */ pci_set_master(pdev); dev = alloc_etherdev(sizeof(struct r6040_private)); if (!dev) { printk(KERN_ERR "Failed to allocate etherdev\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, &pdev->dev); lp = netdev_priv(dev); lp->pdev = pdev; if (pci_request_regions(pdev, DRV_NAME)) { printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); err = -ENODEV; goto err_out_disable; } ioaddr = pci_iomap(pdev, bar, io_size); if (!ioaddr) { printk(KERN_ERR "ioremap failed for device %s\n", pci_name(pdev)); return -EIO; } /* Init system & device */ lp->base = ioaddr; dev->irq = pdev->irq; spin_lock_init(&lp->lock); pci_set_drvdata(pdev, dev); /* Set MAC address */ card_idx++; adrp = (u16 *)dev->dev_addr; adrp[0] = ioread16(ioaddr + MID_0L); adrp[1] = ioread16(ioaddr + MID_0M); adrp[2] = ioread16(ioaddr + MID_0H); /* Link new device into r6040_root_dev */ lp->pdev = pdev; /* Init RDC private data */ lp->mcr0 = 0x1002; lp->phy_addr = phy_table[card_idx]; lp->switch_sig = 0; /* The RDC-specific entries in the device structure. */ dev->open = &r6040_open; dev->hard_start_xmit = &r6040_start_xmit; dev->stop = &r6040_close; dev->get_stats = r6040_get_stats; dev->set_multicast_list = &r6040_multicast_list; dev->do_ioctl = &r6040_ioctl; dev->ethtool_ops = &netdev_ethtool_ops; dev->tx_timeout = &r6040_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = r6040_poll_controller; #endif netif_napi_add(dev, &lp->napi, r6040_poll, 64); lp->mii_if.dev = dev; lp->mii_if.mdio_read = mdio_read; lp->mii_if.mdio_write = mdio_write; lp->mii_if.phy_id = lp->phy_addr; lp->mii_if.phy_id_mask = 0x1f; lp->mii_if.reg_num_mask = 0x1f; /* Register net device. After this dev->name assign */ err = register_netdev(dev); if (err) { printk(KERN_ERR DRV_NAME ": Failed to register net device\n"); goto err_out_res; } return 0; err_out_res: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); free_netdev(dev); return err; } static void __devexit r6040_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); unregister_netdev(dev); pci_release_regions(pdev); free_netdev(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static struct pci_device_id r6040_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) }, { 0 } }; MODULE_DEVICE_TABLE(pci, r6040_pci_tbl); static struct pci_driver r6040_driver = { .name = DRV_NAME, .id_table = r6040_pci_tbl, .probe = r6040_init_one, .remove = __devexit_p(r6040_remove_one), }; static int __init r6040_init(void) { return pci_register_driver(&r6040_driver); } static void __exit r6040_cleanup(void) { pci_unregister_driver(&r6040_driver); } module_init(r6040_init); module_exit(r6040_cleanup);