aboutsummaryrefslogblamecommitdiffstats
path: root/drivers/pci/pci-driver.c
blob: 93eac14235854c648e6e895c51f063b8af68e152 (plain) (tree)
1
2
3
4
5
6
7
8


                           




                                                                    





                         
                            

                         
                        
                      


                


                                                      



                                
 

                     
   



                                                                             



                                                        
              

                                                                         
                                
                                                        
                                                         
                                                   


                                                            
                     
 
                                                     

                                                                 
                       

                               

                                                                         







                                                                          
                 

                                              
         
 
                                                    


                               





                                          
                                            

                                      
                                                        

                                        
                                        
                                                      
                                          

         

                              

                     
                                                        



                                       
                                    

                                     
                                                                     










                                             
                                                                              


                     



                                                              
                           




                                                               
                                                                   


      
                                                                  
                                                       

                                                   
                                                                
                                                                    
                                                         
  
                                                                  
                                         
   

                                                                         
 





                                                                          




                    
                                                                                           
                                        
                                                  



                                                                    
   

                                                                           
 
                                
 
                                                                   





                                                             
         
                                       

                                                

 












                                                  


                                                                      







                                                                    
                        
                        
                                                       









                                                                        


                     

                       

                                                           
   
                                    



                                                                      

                                       


                                             



                                                    
                                                                 



                                              































                                                   






                                                                       











                                                                              














                                                                             
                                             
   
                                                             
 









                                                                           

                                             
                                                          


                   












                                                                            




                                                   
                                  

                                                          

                                             
                                                 
                                                       





                                         




                                                                          
                                   
                 
         
 






                                                                               

                                                     


                 
                                                                          










                                                            
 

                                                      


                                                   

                                                       

 
                                                



                                                   

                                                    

                                                                               

 

                                                                    

                                                                
                                             
                                     
                                                          

 
                                                          
 

                                                    

                                                        

 
                                                           
 
                                                           

                                                    


                                


                                                              
                                                                           
                                      








                                                                                

 

                                    























                                                  
                                                                     
 

                                                             
 










                                                          

                                                          












                                                                             
         
 




                                                      
 



                                                     


                                                   
 
                                                  
                                                
                      
 


                                                                  


                                                                     

         


                                                  
                     
 
 
                                                  


                                                  
                      
 

                                             
                                               
                                                    
 

                                                    



                     
                                            
 
                                                  
                                                                     

                      






                                                                                
                                               
                                              
 
                                       
 





                                                
 
                 















                                                  
                                                                     
 

                                                            
 


                                                

         
                                     
 












                                                         



                                                  
                                                  
                                                

                      


                                                                 


                                                                    

         


                                                  


                     
                                                
 
                                                  


                                                
                                               
                                                    
 
                                                  
 

                                                  



                     
                                          
 
                                                  
                                                                     

                      
                                               
                                              
 





                                                





                                              
                                                  
                                                                     

                      

                                                               
 







                                                

                                                           

         




                                                             
 




                                                    
                                                

                      


                                                                    


                                                                      




                     
                                                   


                                                  
                      
 

                                             
                                               
                                                    
 

                                                     



                     
                                             

                                                  
                                                                     

                      






                                                                                
                                               
                                              
 
                                       
 





                                                 

                     
 
 












                                    








                                    







                                                
                                         






                             
   
                                                    
                                         
                              
                                


                                                               
                                                               

                                             

                                                                       





                                             
                                  
                                        
 

                                          


                                              

                             
 


                                                
















                                                                        
                                   






























                                                                                        
                                                  
                                                                          

                                                                
                                                                   

                                                         
                                                                       
 

                                                        

                                             
                                                      


                         
                 


































                                                                              
                                                               







                                        
                                     

                                            
                                              
                                        
                                         








                                           
                            
                                     




                                     
/*
 * drivers/pci/pci-driver.c
 *
 * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
 * (C) Copyright 2007 Novell Inc.
 *
 * Released under the GPL v2 only.
 *
 */

#include <linux/pci.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mempolicy.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/cpu.h>
#include "pci.h"

/*
 * Dynamic device IDs are disabled for !CONFIG_HOTPLUG
 */

struct pci_dynid {
	struct list_head node;
	struct pci_device_id id;
};

#ifdef CONFIG_HOTPLUG

/**
 * store_new_id - add a new PCI device ID to this driver and re-probe devices
 * @driver: target device driver
 * @buf: buffer for scanning device ID data
 * @count: input size
 *
 * Adds a new dynamic pci device ID to this driver,
 * and causes the driver to probe for all devices again.
 */
static ssize_t
store_new_id(struct device_driver *driver, const char *buf, size_t count)
{
	struct pci_dynid *dynid;
	struct pci_driver *pdrv = to_pci_driver(driver);
	const struct pci_device_id *ids = pdrv->id_table;
	__u32 vendor, device, subvendor=PCI_ANY_ID,
		subdevice=PCI_ANY_ID, class=0, class_mask=0;
	unsigned long driver_data=0;
	int fields=0;
	int retval=0;

	fields = sscanf(buf, "%x %x %x %x %x %x %lx",
			&vendor, &device, &subvendor, &subdevice,
			&class, &class_mask, &driver_data);
	if (fields < 2)
		return -EINVAL;

	/* Only accept driver_data values that match an existing id_table
	   entry */
	if (ids) {
		retval = -EINVAL;
		while (ids->vendor || ids->subvendor || ids->class_mask) {
			if (driver_data == ids->driver_data) {
				retval = 0;
				break;
			}
			ids++;
		}
		if (retval)	/* No match */
			return retval;
	}

	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
	if (!dynid)
		return -ENOMEM;

	dynid->id.vendor = vendor;
	dynid->id.device = device;
	dynid->id.subvendor = subvendor;
	dynid->id.subdevice = subdevice;
	dynid->id.class = class;
	dynid->id.class_mask = class_mask;
	dynid->id.driver_data = driver_data;

	spin_lock(&pdrv->dynids.lock);
	list_add_tail(&dynid->node, &pdrv->dynids.list);
	spin_unlock(&pdrv->dynids.lock);

	if (get_driver(&pdrv->driver)) {
		retval = driver_attach(&pdrv->driver);
		put_driver(&pdrv->driver);
	}

	if (retval)
		return retval;
	return count;
}
static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);

static void
pci_free_dynids(struct pci_driver *drv)
{
	struct pci_dynid *dynid, *n;

	spin_lock(&drv->dynids.lock);
	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
		list_del(&dynid->node);
		kfree(dynid);
	}
	spin_unlock(&drv->dynids.lock);
}

static int
pci_create_newid_file(struct pci_driver *drv)
{
	int error = 0;
	if (drv->probe != NULL)
		error = driver_create_file(&drv->driver, &driver_attr_new_id);
	return error;
}

static void pci_remove_newid_file(struct pci_driver *drv)
{
	driver_remove_file(&drv->driver, &driver_attr_new_id);
}
#else /* !CONFIG_HOTPLUG */
static inline void pci_free_dynids(struct pci_driver *drv) {}
static inline int pci_create_newid_file(struct pci_driver *drv)
{
	return 0;
}
static inline void pci_remove_newid_file(struct pci_driver *drv) {}
#endif

/**
 * pci_match_id - See if a pci device matches a given pci_id table
 * @ids: array of PCI device id structures to search in
 * @dev: the PCI device structure to match against.
 *
 * Used by a driver to check whether a PCI device present in the
 * system is in its list of supported devices.  Returns the matching
 * pci_device_id structure or %NULL if there is no match.
 *
 * Deprecated, don't use this as it will not catch any dynamic ids
 * that a driver might want to check for.
 */
const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
					 struct pci_dev *dev)
{
	if (ids) {
		while (ids->vendor || ids->subvendor || ids->class_mask) {
			if (pci_match_one_device(ids, dev))
				return ids;
			ids++;
		}
	}
	return NULL;
}

/**
 * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
 * @drv: the PCI driver to match against
 * @dev: the PCI device structure to match against
 *
 * Used by a driver to check whether a PCI device present in the
 * system is in its list of supported devices.  Returns the matching
 * pci_device_id structure or %NULL if there is no match.
 */
static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
						    struct pci_dev *dev)
{
	struct pci_dynid *dynid;

	/* Look at the dynamic ids first, before the static ones */
	spin_lock(&drv->dynids.lock);
	list_for_each_entry(dynid, &drv->dynids.list, node) {
		if (pci_match_one_device(&dynid->id, dev)) {
			spin_unlock(&drv->dynids.lock);
			return &dynid->id;
		}
	}
	spin_unlock(&drv->dynids.lock);

	return pci_match_id(drv->id_table, dev);
}

struct drv_dev_and_id {
	struct pci_driver *drv;
	struct pci_dev *dev;
	const struct pci_device_id *id;
};

static long local_pci_probe(void *_ddi)
{
	struct drv_dev_and_id *ddi = _ddi;

	return ddi->drv->probe(ddi->dev, ddi->id);
}

static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
			  const struct pci_device_id *id)
{
	int error, node;
	struct drv_dev_and_id ddi = { drv, dev, id };

	/* Execute driver initialization on node where the device's
	   bus is attached to.  This way the driver likely allocates
	   its local memory on the right node without any need to
	   change it. */
	node = dev_to_node(&dev->dev);
	if (node >= 0) {
		int cpu;
		node_to_cpumask_ptr(nodecpumask, node);

		get_online_cpus();
		cpu = cpumask_any_and(nodecpumask, cpu_online_mask);
		if (cpu < nr_cpu_ids)
			error = work_on_cpu(cpu, local_pci_probe, &ddi);
		else
			error = local_pci_probe(&ddi);
		put_online_cpus();
	} else
		error = local_pci_probe(&ddi);
	return error;
}

/**
 * __pci_device_probe()
 * @drv: driver to call to check if it wants the PCI device
 * @pci_dev: PCI device being probed
 * 
 * returns 0 on success, else error.
 * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
 */
static int
__pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
{
	const struct pci_device_id *id;
	int error = 0;

	if (!pci_dev->driver && drv->probe) {
		error = -ENODEV;

		id = pci_match_device(drv, pci_dev);
		if (id)
			error = pci_call_probe(drv, pci_dev, id);
		if (error >= 0) {
			pci_dev->driver = drv;
			error = 0;
		}
	}
	return error;
}

static int pci_device_probe(struct device * dev)
{
	int error = 0;
	struct pci_driver *drv;
	struct pci_dev *pci_dev;

	drv = to_pci_driver(dev->driver);
	pci_dev = to_pci_dev(dev);
	pci_dev_get(pci_dev);
	error = __pci_device_probe(drv, pci_dev);
	if (error)
		pci_dev_put(pci_dev);

	return error;
}

static int pci_device_remove(struct device * dev)
{
	struct pci_dev * pci_dev = to_pci_dev(dev);
	struct pci_driver * drv = pci_dev->driver;

	if (drv) {
		if (drv->remove)
			drv->remove(pci_dev);
		pci_dev->driver = NULL;
	}

	/*
	 * If the device is still on, set the power state as "unknown",
	 * since it might change by the next time we load the driver.
	 */
	if (pci_dev->current_state == PCI_D0)
		pci_dev->current_state = PCI_UNKNOWN;

	/*
	 * We would love to complain here if pci_dev->is_enabled is set, that
	 * the driver should have called pci_disable_device(), but the
	 * unfortunate fact is there are too many odd BIOS and bridge setups
	 * that don't like drivers doing that all of the time.  
	 * Oh well, we can dream of sane hardware when we sleep, no matter how
	 * horrible the crap we have to deal with is when we are awake...
	 */

	pci_dev_put(pci_dev);
	return 0;
}

static void pci_device_shutdown(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct pci_driver *drv = pci_dev->driver;

	if (drv && drv->shutdown)
		drv->shutdown(pci_dev);
	pci_msi_shutdown(pci_dev);
	pci_msix_shutdown(pci_dev);
}

#ifdef CONFIG_PM_SLEEP

/*
 * Default "suspend" method for devices that have no driver provided suspend,
 * or not even a driver at all (second part).
 */
static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
{
	/*
	 * mark its power state as "unknown", since we don't know if
	 * e.g. the BIOS will change its device state when we suspend.
	 */
	if (pci_dev->current_state == PCI_D0)
		pci_dev->current_state = PCI_UNKNOWN;
}

/*
 * Default "resume" method for devices that have no driver provided resume,
 * or not even a driver at all (second part).
 */
static int pci_pm_reenable_device(struct pci_dev *pci_dev)
{
	int retval;

	/* if the device was enabled before suspend, reenable */
	retval = pci_reenable_device(pci_dev);
	/*
	 * if the device was busmaster before the suspend, make it busmaster
	 * again
	 */
	if (pci_dev->is_busmaster)
		pci_set_master(pci_dev);

	return retval;
}

static int pci_legacy_suspend(struct device *dev, pm_message_t state)
{
	struct pci_dev * pci_dev = to_pci_dev(dev);
	struct pci_driver * drv = pci_dev->driver;
	int i = 0;

	if (drv && drv->suspend) {
		pci_power_t prev = pci_dev->current_state;

		pci_dev->state_saved = false;

		i = drv->suspend(pci_dev, state);
		suspend_report_result(drv->suspend, i);
		if (i)
			return i;

		if (pci_dev->state_saved)
			goto Fixup;

		if (pci_dev->current_state != PCI_D0
		    && pci_dev->current_state != PCI_UNKNOWN) {
			WARN_ONCE(pci_dev->current_state != prev,
				"PCI PM: Device state not saved by %pF\n",
				drv->suspend);
			goto Fixup;
		}
	}

	pci_save_state(pci_dev);
	/*
	 * This is for compatibility with existing code with legacy PM support.
	 */
	pci_pm_set_unknown_state(pci_dev);

 Fixup:
	pci_fixup_device(pci_fixup_suspend, pci_dev);

	return i;
}

static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{
	struct pci_dev * pci_dev = to_pci_dev(dev);
	struct pci_driver * drv = pci_dev->driver;
	int i = 0;

	if (drv && drv->suspend_late) {
		i = drv->suspend_late(pci_dev, state);
		suspend_report_result(drv->suspend_late, i);
	}
	return i;
}

static int pci_legacy_resume_early(struct device *dev)
{
	struct pci_dev * pci_dev = to_pci_dev(dev);
	struct pci_driver * drv = pci_dev->driver;

	return drv && drv->resume_early ?
			drv->resume_early(pci_dev) : 0;
}

static int pci_legacy_resume(struct device *dev)
{
	struct pci_dev * pci_dev = to_pci_dev(dev);
	struct pci_driver * drv = pci_dev->driver;

	pci_fixup_device(pci_fixup_resume, pci_dev);

	return drv && drv->resume ?
			drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
}

/* Auxiliary functions used by the new power management framework */

static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
{
	pci_restore_standard_config(pci_dev);
	pci_dev->state_saved = false;
	pci_fixup_device(pci_fixup_resume_early, pci_dev);
}

static void pci_pm_default_resume(struct pci_dev *pci_dev)
{
	pci_fixup_device(pci_fixup_resume, pci_dev);

	if (!pci_is_bridge(pci_dev))
		pci_enable_wake(pci_dev, PCI_D0, false);
}

static void pci_pm_default_suspend(struct pci_dev *pci_dev)
{
	/* Disable non-bridge devices without PM support */
	if (!pci_is_bridge(pci_dev))
		pci_disable_enabled_device(pci_dev);
	pci_save_state(pci_dev);
}

static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
	struct pci_driver *drv = pci_dev->driver;
	bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
		|| drv->resume_early);

	/*
	 * Legacy PM support is used by default, so warn if the new framework is
	 * supported as well.  Drivers are supposed to support either the
	 * former, or the latter, but not both at the same time.
	 */
	WARN_ON(ret && drv->driver.pm);

	return ret;
}

/* New power management framework */

static int pci_pm_prepare(struct device *dev)
{
	struct device_driver *drv = dev->driver;
	int error = 0;

	if (drv && drv->pm && drv->pm->prepare)
		error = drv->pm->prepare(dev);

	return error;
}

static void pci_pm_complete(struct device *dev)
{
	struct device_driver *drv = dev->driver;

	if (drv && drv->pm && drv->pm->complete)
		drv->pm->complete(dev);
}

#ifdef CONFIG_SUSPEND

static int pci_pm_suspend(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_suspend(dev, PMSG_SUSPEND);

	if (!pm) {
		pci_pm_default_suspend(pci_dev);
		goto Fixup;
	}

	pci_dev->state_saved = false;

	if (pm->suspend) {
		pci_power_t prev = pci_dev->current_state;
		int error;

		error = pm->suspend(dev);
		suspend_report_result(pm->suspend, error);
		if (error)
			return error;

		if (pci_dev->state_saved)
			goto Fixup;

		if (pci_dev->current_state != PCI_D0
		    && pci_dev->current_state != PCI_UNKNOWN) {
			WARN_ONCE(pci_dev->current_state != prev,
				"PCI PM: State of device not saved by %pF\n",
				pm->suspend);
			goto Fixup;
		}
	}

	if (!pci_dev->state_saved) {
		pci_save_state(pci_dev);
		if (!pci_is_bridge(pci_dev))
			pci_prepare_to_sleep(pci_dev);
	}

 Fixup:
	pci_fixup_device(pci_fixup_suspend, pci_dev);

	return 0;
}

static int pci_pm_suspend_noirq(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct device_driver *drv = dev->driver;
	int error = 0;

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_suspend_late(dev, PMSG_SUSPEND);

	if (drv && drv->pm && drv->pm->suspend_noirq) {
		error = drv->pm->suspend_noirq(dev);
		suspend_report_result(drv->pm->suspend_noirq, error);
	}

	if (!error)
		pci_pm_set_unknown_state(pci_dev);

	return error;
}

static int pci_pm_resume_noirq(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct device_driver *drv = dev->driver;
	int error = 0;

	pci_pm_default_resume_noirq(pci_dev);

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_resume_early(dev);

	if (drv && drv->pm && drv->pm->resume_noirq)
		error = drv->pm->resume_noirq(dev);

	return error;
}

static int pci_pm_resume(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
	int error = 0;

	/*
	 * This is necessary for the suspend error path in which resume is
	 * called without restoring the standard config registers of the device.
	 */
	if (pci_dev->state_saved)
		pci_restore_standard_config(pci_dev);

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_resume(dev);

	pci_pm_default_resume(pci_dev);

	if (pm) {
		if (pm->resume)
			error = pm->resume(dev);
	} else {
		pci_pm_reenable_device(pci_dev);
	}

	return 0;
}

#else /* !CONFIG_SUSPEND */

#define pci_pm_suspend		NULL
#define pci_pm_suspend_noirq	NULL
#define pci_pm_resume		NULL
#define pci_pm_resume_noirq	NULL

#endif /* !CONFIG_SUSPEND */

#ifdef CONFIG_HIBERNATION

static int pci_pm_freeze(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_suspend(dev, PMSG_FREEZE);

	if (!pm) {
		pci_pm_default_suspend(pci_dev);
		return 0;
	}

	pci_dev->state_saved = false;

	if (pm->freeze) {
		int error;

		error = pm->freeze(dev);
		suspend_report_result(pm->freeze, error);
		if (error)
			return error;
	}

	if (!pci_dev->state_saved)
		pci_save_state(pci_dev);

	return 0;
}

static int pci_pm_freeze_noirq(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct device_driver *drv = dev->driver;
	int error = 0;

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_suspend_late(dev, PMSG_FREEZE);

	if (drv && drv->pm && drv->pm->freeze_noirq) {
		error = drv->pm->freeze_noirq(dev);
		suspend_report_result(drv->pm->freeze_noirq, error);
	}

	if (!error)
		pci_pm_set_unknown_state(pci_dev);

	return error;
}

static int pci_pm_thaw_noirq(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct device_driver *drv = dev->driver;
	int error = 0;

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_resume_early(dev);

	pci_update_current_state(pci_dev, PCI_D0);

	if (drv && drv->pm && drv->pm->thaw_noirq)
		error = drv->pm->thaw_noirq(dev);

	return error;
}

static int pci_pm_thaw(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
	int error = 0;

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_resume(dev);

	if (pm) {
		if (pm->thaw)
			error = pm->thaw(dev);
	} else {
		pci_pm_reenable_device(pci_dev);
	}

	return error;
}

static int pci_pm_poweroff(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
	int error = 0;

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_suspend(dev, PMSG_HIBERNATE);

	if (!pm) {
		pci_pm_default_suspend(pci_dev);
		goto Fixup;
	}

	pci_dev->state_saved = false;

	if (pm->poweroff) {
		error = pm->poweroff(dev);
		suspend_report_result(pm->poweroff, error);
	}

	if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
		pci_prepare_to_sleep(pci_dev);

 Fixup:
	pci_fixup_device(pci_fixup_suspend, pci_dev);

	return error;
}

static int pci_pm_poweroff_noirq(struct device *dev)
{
	struct device_driver *drv = dev->driver;
	int error = 0;

	if (pci_has_legacy_pm_support(to_pci_dev(dev)))
		return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);

	if (drv && drv->pm && drv->pm->poweroff_noirq) {
		error = drv->pm->poweroff_noirq(dev);
		suspend_report_result(drv->pm->poweroff_noirq, error);
	}

	return error;
}

static int pci_pm_restore_noirq(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct device_driver *drv = dev->driver;
	int error = 0;

	pci_pm_default_resume_noirq(pci_dev);

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_resume_early(dev);

	if (drv && drv->pm && drv->pm->restore_noirq)
		error = drv->pm->restore_noirq(dev);

	return error;
}

static int pci_pm_restore(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
	int error = 0;

	/*
	 * This is necessary for the hibernation error path in which restore is
	 * called without restoring the standard config registers of the device.
	 */
	if (pci_dev->state_saved)
		pci_restore_standard_config(pci_dev);

	if (pci_has_legacy_pm_support(pci_dev))
		return pci_legacy_resume(dev);

	pci_pm_default_resume(pci_dev);

	if (pm) {
		if (pm->restore)
			error = pm->restore(dev);
	} else {
		pci_pm_reenable_device(pci_dev);
	}

	return error;
}

#else /* !CONFIG_HIBERNATION */

#define pci_pm_freeze		NULL
#define pci_pm_freeze_noirq	NULL
#define pci_pm_thaw		NULL
#define pci_pm_thaw_noirq	NULL
#define pci_pm_poweroff		NULL
#define pci_pm_poweroff_noirq	NULL
#define pci_pm_restore		NULL
#define pci_pm_restore_noirq	NULL

#endif /* !CONFIG_HIBERNATION */

struct dev_pm_ops pci_dev_pm_ops = {
	.prepare = pci_pm_prepare,
	.complete = pci_pm_complete,
	.suspend = pci_pm_suspend,
	.resume = pci_pm_resume,
	.freeze = pci_pm_freeze,
	.thaw = pci_pm_thaw,
	.poweroff = pci_pm_poweroff,
	.restore = pci_pm_restore,
	.suspend_noirq = pci_pm_suspend_noirq,
	.resume_noirq = pci_pm_resume_noirq,
	.freeze_noirq = pci_pm_freeze_noirq,
	.thaw_noirq = pci_pm_thaw_noirq,
	.poweroff_noirq = pci_pm_poweroff_noirq,
	.restore_noirq = pci_pm_restore_noirq,
};

#define PCI_PM_OPS_PTR	(&pci_dev_pm_ops)

#else /* !CONFIG_PM_SLEEP */

#define PCI_PM_OPS_PTR	NULL

#endif /* !CONFIG_PM_SLEEP */

/**
 * __pci_register_driver - register a new pci driver
 * @drv: the driver structure to register
 * @owner: owner module of drv
 * @mod_name: module name string
 * 
 * Adds the driver structure to the list of registered drivers.
 * Returns a negative value on error, otherwise 0. 
 * If no error occurred, the driver remains registered even if 
 * no device was claimed during registration.
 */
int __pci_register_driver(struct pci_driver *drv, struct module *owner,
			  const char *mod_name)
{
	int error;

	/* initialize common driver fields */
	drv->driver.name = drv->name;
	drv->driver.bus = &pci_bus_type;
	drv->driver.owner = owner;
	drv->driver.mod_name = mod_name;

	spin_lock_init(&drv->dynids.lock);
	INIT_LIST_HEAD(&drv->dynids.list);

	/* register with core */
	error = driver_register(&drv->driver);
	if (error)
		return error;

	error = pci_create_newid_file(drv);
	if (error)
		driver_unregister(&drv->driver);

	return error;
}

/**
 * pci_unregister_driver - unregister a pci driver
 * @drv: the driver structure to unregister
 * 
 * Deletes the driver structure from the list of registered PCI drivers,
 * gives it a chance to clean up by calling its remove() function for
 * each device it was responsible for, and marks those devices as
 * driverless.
 */

void
pci_unregister_driver(struct pci_driver *drv)
{
	pci_remove_newid_file(drv);
	driver_unregister(&drv->driver);
	pci_free_dynids(drv);
}

static struct pci_driver pci_compat_driver = {
	.name = "compat"
};

/**
 * pci_dev_driver - get the pci_driver of a device
 * @dev: the device to query
 *
 * Returns the appropriate pci_driver structure or %NULL if there is no 
 * registered driver for the device.
 */
struct pci_driver *
pci_dev_driver(const struct pci_dev *dev)
{
	if (dev->driver)
		return dev->driver;
	else {
		int i;
		for(i=0; i<=PCI_ROM_RESOURCE; i++)
			if (dev->resource[i].flags & IORESOURCE_BUSY)
				return &pci_compat_driver;
	}
	return NULL;
}

/**
 * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
 * @dev: the PCI device structure to match against
 * @drv: the device driver to search for matching PCI device id structures
 * 
 * Used by a driver to check whether a PCI device present in the
 * system is in its list of supported devices. Returns the matching
 * pci_device_id structure or %NULL if there is no match.
 */
static int pci_bus_match(struct device *dev, struct device_driver *drv)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct pci_driver *pci_drv = to_pci_driver(drv);
	const struct pci_device_id *found_id;

	found_id = pci_match_device(pci_drv, pci_dev);
	if (found_id)
		return 1;

	return 0;
}

/**
 * pci_dev_get - increments the reference count of the pci device structure
 * @dev: the device being referenced
 *
 * Each live reference to a device should be refcounted.
 *
 * Drivers for PCI devices should normally record such references in
 * their probe() methods, when they bind to a device, and release
 * them by calling pci_dev_put(), in their disconnect() methods.
 *
 * A pointer to the device with the incremented reference counter is returned.
 */
struct pci_dev *pci_dev_get(struct pci_dev *dev)
{
	if (dev)
		get_device(&dev->dev);
	return dev;
}

/**
 * pci_dev_put - release a use of the pci device structure
 * @dev: device that's been disconnected
 *
 * Must be called when a user of a device is finished with it.  When the last
 * user of the device calls this function, the memory of the device is freed.
 */
void pci_dev_put(struct pci_dev *dev)
{
	if (dev)
		put_device(&dev->dev);
}

#ifndef CONFIG_HOTPLUG
int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	return -ENODEV;
}
#endif

struct bus_type pci_bus_type = {
	.name		= "pci",
	.match		= pci_bus_match,
	.uevent		= pci_uevent,
	.probe		= pci_device_probe,
	.remove		= pci_device_remove,
	.shutdown	= pci_device_shutdown,
	.dev_attrs	= pci_dev_attrs,
	.pm		= PCI_PM_OPS_PTR,
};

static int __init pci_driver_init(void)
{
	return bus_register(&pci_bus_type);
}

postcore_initcall(pci_driver_init);

EXPORT_SYMBOL(pci_match_id);
EXPORT_SYMBOL(__pci_register_driver);
EXPORT_SYMBOL(pci_unregister_driver);
EXPORT_SYMBOL(pci_dev_driver);
EXPORT_SYMBOL(pci_bus_type);
EXPORT_SYMBOL(pci_dev_get);
EXPORT_SYMBOL(pci_dev_put);
->fn, p->tx_chan, lc); return 0; } static u32 get_rx_csum(struct net_device *dev) { struct port_info *p = netdev_priv(dev); return p->rx_offload & RX_CSO; } static int set_rx_csum(struct net_device *dev, u32 data) { struct port_info *p = netdev_priv(dev); if (data) p->rx_offload |= RX_CSO; else p->rx_offload &= ~RX_CSO; return 0; } static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { const struct port_info *pi = netdev_priv(dev); const struct sge *s = &pi->adapter->sge; e->rx_max_pending = MAX_RX_BUFFERS; e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; e->rx_jumbo_max_pending = 0; e->tx_max_pending = MAX_TXQ_ENTRIES; e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; e->rx_jumbo_pending = 0; e->tx_pending = s->ethtxq[pi->first_qset].q.size; } static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { int i; const struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || e->tx_pending > MAX_TXQ_ENTRIES || e->rx_mini_pending > MAX_RSPQ_ENTRIES || e->rx_mini_pending < MIN_RSPQ_ENTRIES || e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) return -EINVAL; if (adapter->flags & FULL_INIT_DONE) return -EBUSY; for (i = 0; i < pi->nqsets; ++i) { s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; } return 0; } static int closest_timer(const struct sge *s, int time) { int i, delta, match = 0, min_delta = INT_MAX; for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { delta = time - s->timer_val[i]; if (delta < 0) delta = -delta; if (delta < min_delta) { min_delta = delta; match = i; } } return match; } static int closest_thres(const struct sge *s, int thres) { int i, delta, match = 0, min_delta = INT_MAX; for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { delta = thres - s->counter_val[i]; if (delta < 0) delta = -delta; if (delta < min_delta) { min_delta = delta; match = i; } } return match; } /* * Return a queue's interrupt hold-off time in us. 0 means no timer. */ static unsigned int qtimer_val(const struct adapter *adap, const struct sge_rspq *q) { unsigned int idx = q->intr_params >> 1; return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; } /** * set_rxq_intr_params - set a queue's interrupt holdoff parameters * @adap: the adapter * @q: the Rx queue * @us: the hold-off time in us, or 0 to disable timer * @cnt: the hold-off packet count, or 0 to disable counter * * Sets an Rx queue's interrupt hold-off time and packet count. At least * one of the two needs to be enabled for the queue to generate interrupts. */ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, unsigned int us, unsigned int cnt) { if ((us | cnt) == 0) cnt = 1; if (cnt) { int err; u32 v, new_idx; new_idx = closest_thres(&adap->sge, cnt); if (q->desc && q->pktcnt_idx != new_idx) { /* the queue has already been created, update it */ v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | FW_PARAMS_PARAM_YZ(q->cntxt_id); err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, &new_idx); if (err) return err; } q->pktcnt_idx = new_idx; } us = us == 0 ? 6 : closest_timer(&adap->sge, us); q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); return 0; } static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { const struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, c->rx_coalesce_usecs, c->rx_max_coalesced_frames); } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { const struct port_info *pi = netdev_priv(dev); const struct adapter *adap = pi->adapter; const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; c->rx_coalesce_usecs = qtimer_val(adap, rq); c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? adap->sge.counter_val[rq->pktcnt_idx] : 0; return 0; } /* * Translate a physical EEPROM address to virtual. The first 1K is accessed * through virtual addresses starting at 31K, the rest is accessed through * virtual addresses starting at 0. This mapping is correct only for PF0. */ static int eeprom_ptov(unsigned int phys_addr) { if (phys_addr < 1024) return phys_addr + (31 << 10); if (phys_addr < EEPROMSIZE) return phys_addr - 1024; return -EINVAL; } /* * The next two routines implement eeprom read/write from physical addresses. * The physical->virtual translation is correct only for PF0. */ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) { int vaddr = eeprom_ptov(phys_addr); if (vaddr >= 0) vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); return vaddr < 0 ? vaddr : 0; } static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) { int vaddr = eeprom_ptov(phys_addr); if (vaddr >= 0) vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); return vaddr < 0 ? vaddr : 0; } #define EEPROM_MAGIC 0x38E2F10C static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, u8 *data) { int i, err = 0; struct adapter *adapter = netdev2adap(dev); u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); if (!buf) return -ENOMEM; e->magic = EEPROM_MAGIC; for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); if (!err) memcpy(data, buf + e->offset, e->len); kfree(buf); return err; } static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { u8 *buf; int err = 0; u32 aligned_offset, aligned_len, *p; struct adapter *adapter = netdev2adap(dev); if (eeprom->magic != EEPROM_MAGIC) return -EINVAL; aligned_offset = eeprom->offset & ~3; aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { /* * RMW possibly needed for first or last words. */ buf = kmalloc(aligned_len, GFP_KERNEL); if (!buf) return -ENOMEM; err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); if (!err && aligned_len > 4) err = eeprom_rd_phys(adapter, aligned_offset + aligned_len - 4, (u32 *)&buf[aligned_len - 4]); if (err) goto out; memcpy(buf + (eeprom->offset & 3), data, eeprom->len); } else buf = data; err = t4_seeprom_wp(adapter, false); if (err) goto out; for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { err = eeprom_wr_phys(adapter, aligned_offset, *p); aligned_offset += 4; } if (!err) err = t4_seeprom_wp(adapter, true); out: if (buf != data) kfree(buf); return err; } static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) { int ret; const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); if (ret < 0) return ret; ret = t4_load_fw(adap, fw->data, fw->size); release_firmware(fw); if (!ret) dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); return ret; } #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) #define BCAST_CRC 0xa0ccc1a6 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { wol->supported = WAKE_BCAST | WAKE_MAGIC; wol->wolopts = netdev2adap(dev)->wol; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { int err = 0; struct port_info *pi = netdev_priv(dev); if (wol->wolopts & ~WOL_SUPPORTED) return -EINVAL; t4_wol_magic_enable(pi->adapter, pi->tx_chan, (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); if (wol->wolopts & WAKE_BCAST) { err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, ~0ULL, 0, false); if (!err) err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, ~6ULL, ~0ULL, BCAST_CRC, true); } else t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); return err; } #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) static int set_tso(struct net_device *dev, u32 value) { if (value) dev->features |= TSO_FLAGS; else dev->features &= ~TSO_FLAGS; return 0; } static int set_flags(struct net_device *dev, u32 flags) { return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH); } static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) { const struct port_info *pi = netdev_priv(dev); unsigned int n = min_t(unsigned int, p->size, pi->rss_size); p->size = pi->rss_size; while (n--) p->ring_index[n] = pi->rss[n]; return 0; } static int set_rss_table(struct net_device *dev, const struct ethtool_rxfh_indir *p) { unsigned int i; struct port_info *pi = netdev_priv(dev); if (p->size != pi->rss_size) return -EINVAL; for (i = 0; i < p->size; i++) if (p->ring_index[i] >= pi->nqsets) return -EINVAL; for (i = 0; i < p->size; i++) pi->rss[i] = p->ring_index[i]; if (pi->adapter->flags & FULL_INIT_DONE) return write_rss(pi, pi->rss); return 0; } static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, void *rules) { const struct port_info *pi = netdev_priv(dev); switch (info->cmd) { case ETHTOOL_GRXFH: { unsigned int v = pi->rss_mode; info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V4_FLOW: if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V6_FLOW: if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) info->data = RXH_IP_SRC | RXH_IP_DST; break; } return 0; } case ETHTOOL_GRXRINGS: info->data = pi->nqsets; return 0; } return -EOPNOTSUPP; } static struct ethtool_ops cxgb_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .get_ringparam = get_sge_param, .set_ringparam = set_sge_param, .get_coalesce = get_coalesce, .set_coalesce = set_coalesce, .get_eeprom_len = get_eeprom_len, .get_eeprom = get_eeprom, .set_eeprom = set_eeprom, .get_pauseparam = get_pauseparam, .set_pauseparam = set_pauseparam, .get_rx_csum = get_rx_csum, .set_rx_csum = set_rx_csum, .set_tx_csum = ethtool_op_set_tx_ipv6_csum, .set_sg = ethtool_op_set_sg, .get_link = ethtool_op_get_link, .get_strings = get_strings, .phys_id = identify_port, .nway_reset = restart_autoneg, .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, .get_wol = get_wol, .set_wol = set_wol, .set_tso = set_tso, .set_flags = set_flags, .get_rxnfc = get_rxnfc, .get_rxfh_indir = get_rss_table, .set_rxfh_indir = set_rss_table, .flash_device = set_flash, }; /* * debugfs support */ static int mem_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { loff_t pos = *ppos; loff_t avail = file->f_path.dentry->d_inode->i_size; unsigned int mem = (uintptr_t)file->private_data & 3; struct adapter *adap = file->private_data - mem; if (pos < 0) return -EINVAL; if (pos >= avail) return 0; if (count > avail - pos) count = avail - pos; while (count) { size_t len; int ret, ofst; __be32 data[16]; if (mem == MEM_MC) ret = t4_mc_read(adap, pos, data, NULL); else ret = t4_edc_read(adap, mem, pos, data, NULL); if (ret) return ret; ofst = pos % sizeof(data); len = min(count, sizeof(data) - ofst); if (copy_to_user(buf, (u8 *)data + ofst, len)) return -EFAULT; buf += len; pos += len; count -= len; } count = pos - *ppos; *ppos = pos; return count; } static const struct file_operations mem_debugfs_fops = { .owner = THIS_MODULE, .open = mem_open, .read = mem_read, }; static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) { struct dentry *de; de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, (void *)adap + idx, &mem_debugfs_fops); if (de && de->d_inode) de->d_inode->i_size = size_mb << 20; } static int __devinit setup_debugfs(struct adapter *adap) { int i; if (IS_ERR_OR_NULL(adap->debugfs_root)) return -1; i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); if (i & EDRAM0_ENABLE) add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); if (i & EDRAM1_ENABLE) add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); if (i & EXT_MEM_ENABLE) add_debugfs_mem(adap, "mc", MEM_MC, EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); if (adap->l2t) debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, &t4_l2t_fops); return 0; } /* * upper-layer driver support */ /* * Allocate an active-open TID and set it to the supplied value. */ int cxgb4_alloc_atid(struct tid_info *t, void *data) { int atid = -1; spin_lock_bh(&t->atid_lock); if (t->afree) { union aopen_entry *p = t->afree; atid = p - t->atid_tab; t->afree = p->next; p->data = data; t->atids_in_use++; } spin_unlock_bh(&t->atid_lock); return atid; } EXPORT_SYMBOL(cxgb4_alloc_atid); /* * Release an active-open TID. */ void cxgb4_free_atid(struct tid_info *t, unsigned int atid) { union aopen_entry *p = &t->atid_tab[atid]; spin_lock_bh(&t->atid_lock); p->next = t->afree; t->afree = p; t->atids_in_use--; spin_unlock_bh(&t->atid_lock); } EXPORT_SYMBOL(cxgb4_free_atid); /* * Allocate a server TID and set it to the supplied value. */ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) { int stid; spin_lock_bh(&t->stid_lock); if (family == PF_INET) { stid = find_first_zero_bit(t->stid_bmap, t->nstids); if (stid < t->nstids) __set_bit(stid, t->stid_bmap); else stid = -1; } else { stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); if (stid < 0) stid = -1; } if (stid >= 0) { t->stid_tab[stid].data = data; stid += t->stid_base; t->stids_in_use++; } spin_unlock_bh(&t->stid_lock); return stid; } EXPORT_SYMBOL(cxgb4_alloc_stid); /* * Release a server TID. */ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) { stid -= t->stid_base; spin_lock_bh(&t->stid_lock); if (family == PF_INET) __clear_bit(stid, t->stid_bmap); else bitmap_release_region(t->stid_bmap, stid, 2); t->stid_tab[stid].data = NULL; t->stids_in_use--; spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); /* * Populate a TID_RELEASE WR. Caller must properly size the skb. */ static void mk_tid_release(struct sk_buff *skb, unsigned int chan, unsigned int tid) { struct cpl_tid_release *req; set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); INIT_TP_WR(req, tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); } /* * Queue a TID release request and if necessary schedule a work queue to * process it. */ void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, unsigned int tid) { void **p = &t->tid_tab[tid]; struct adapter *adap = container_of(t, struct adapter, tids); spin_lock_bh(&adap->tid_release_lock); *p = adap->tid_release_head; /* Low 2 bits encode the Tx channel number */ adap->tid_release_head = (void **)((uintptr_t)p | chan); if (!adap->tid_release_task_busy) { adap->tid_release_task_busy = true; schedule_work(&adap->tid_release_task); } spin_unlock_bh(&adap->tid_release_lock); } EXPORT_SYMBOL(cxgb4_queue_tid_release); /* * Process the list of pending TID release requests. */ static void process_tid_release_list(struct work_struct *work) { struct sk_buff *skb; struct adapter *adap; adap = container_of(work, struct adapter, tid_release_task); spin_lock_bh(&adap->tid_release_lock); while (adap->tid_release_head) { void **p = adap->tid_release_head; unsigned int chan = (uintptr_t)p & 3; p = (void *)p - chan; adap->tid_release_head = *p; *p = NULL; spin_unlock_bh(&adap->tid_release_lock); while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL))) schedule_timeout_uninterruptible(1); mk_tid_release(skb, chan, p - adap->tids.tid_tab); t4_ofld_send(adap, skb); spin_lock_bh(&adap->tid_release_lock); } adap->tid_release_task_busy = false; spin_unlock_bh(&adap->tid_release_lock); } /* * Release a TID and inform HW. If we are unable to allocate the release * message we defer to a work queue. */ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) { void *old; struct sk_buff *skb; struct adapter *adap = container_of(t, struct adapter, tids); old = t->tid_tab[tid]; skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); if (likely(skb)) { t->tid_tab[tid] = NULL; mk_tid_release(skb, chan, tid); t4_ofld_send(adap, skb); } else cxgb4_queue_tid_release(t, chan, tid); if (old) atomic_dec(&t->tids_in_use); } EXPORT_SYMBOL(cxgb4_remove_tid); /* * Allocate and initialize the TID tables. Returns 0 on success. */ static int tid_init(struct tid_info *t) { size_t size; unsigned int natids = t->natids; size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + t->nstids * sizeof(*t->stid_tab) + BITS_TO_LONGS(t->nstids) * sizeof(long); t->tid_tab = t4_alloc_mem(size); if (!t->tid_tab) return -ENOMEM; t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; spin_lock_init(&t->stid_lock); spin_lock_init(&t->atid_lock); t->stids_in_use = 0; t->afree = NULL; t->atids_in_use = 0; atomic_set(&t->tids_in_use, 0); /* Setup the free list for atid_tab and clear the stid bitmap. */ if (natids) { while (--natids) t->atid_tab[natids - 1].next = &t->atid_tab[natids]; t->afree = t->atid_tab; } bitmap_zero(t->stid_bmap, t->nstids); return 0; } /** * cxgb4_create_server - create an IP server * @dev: the device * @stid: the server TID * @sip: local IP address to bind server to * @sport: the server's TCP port * @queue: queue to direct messages from this server to * * Create an IP server for the given port and address. * Returns <0 on error and one of the %NET_XMIT_* values on success. */ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, __be32 sip, __be16 sport, unsigned int queue) { unsigned int chan; struct sk_buff *skb; struct adapter *adap; struct cpl_pass_open_req *req; skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) return -ENOMEM; adap = netdev2adap(dev); req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); req->local_port = sport; req->peer_port = htons(0); req->local_ip = sip; req->peer_ip = htonl(0); chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; req->opt0 = cpu_to_be64(TX_CHAN(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); return t4_mgmt_tx(adap, skb); } EXPORT_SYMBOL(cxgb4_create_server); /** * cxgb4_create_server6 - create an IPv6 server * @dev: the device * @stid: the server TID * @sip: local IPv6 address to bind server to * @sport: the server's TCP port * @queue: queue to direct messages from this server to * * Create an IPv6 server for the given port and address. * Returns <0 on error and one of the %NET_XMIT_* values on success. */ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, const struct in6_addr *sip, __be16 sport, unsigned int queue) { unsigned int chan; struct sk_buff *skb; struct adapter *adap; struct cpl_pass_open_req6 *req; skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) return -ENOMEM; adap = netdev2adap(dev); req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); req->local_port = sport; req->peer_port = htons(0); req->local_ip_hi = *(__be64 *)(sip->s6_addr); req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); req->peer_ip_hi = cpu_to_be64(0); req->peer_ip_lo = cpu_to_be64(0); chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; req->opt0 = cpu_to_be64(TX_CHAN(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); return t4_mgmt_tx(adap, skb); } EXPORT_SYMBOL(cxgb4_create_server6); /** * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU * @mtus: the HW MTU table * @mtu: the target MTU * @idx: index of selected entry in the MTU table * * Returns the index and the value in the HW MTU table that is closest to * but does not exceed @mtu, unless @mtu is smaller than any value in the * table, in which case that smallest available value is selected. */ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, unsigned int *idx) { unsigned int i = 0; while (i < NMTUS - 1 && mtus[i + 1] <= mtu) ++i; if (idx) *idx = i; return mtus[i]; } EXPORT_SYMBOL(cxgb4_best_mtu); /** * cxgb4_port_chan - get the HW channel of a port * @dev: the net device for the port * * Return the HW Tx channel of the given port. */ unsigned int cxgb4_port_chan(const struct net_device *dev) { return netdev2pinfo(dev)->tx_chan; } EXPORT_SYMBOL(cxgb4_port_chan); /** * cxgb4_port_viid - get the VI id of a port * @dev: the net device for the port * * Return the VI id of the given port. */ unsigned int cxgb4_port_viid(const struct net_device *dev) { return netdev2pinfo(dev)->viid; } EXPORT_SYMBOL(cxgb4_port_viid); /** * cxgb4_port_idx - get the index of a port * @dev: the net device for the port * * Return the index of the given port. */ unsigned int cxgb4_port_idx(const struct net_device *dev) { return netdev2pinfo(dev)->port_id; } EXPORT_SYMBOL(cxgb4_port_idx); /** * cxgb4_netdev_by_hwid - return the net device of a HW port * @pdev: identifies the adapter * @id: the HW port id * * Return the net device associated with the interface with the given HW * id. */ struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) { const struct adapter *adap = pci_get_drvdata(pdev); if (!adap || id >= NCHAN) return NULL; id = adap->chan_map[id]; return id < MAX_NPORTS ? adap->port[id] : NULL; } EXPORT_SYMBOL(cxgb4_netdev_by_hwid); void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6) { struct adapter *adap = pci_get_drvdata(pdev); spin_lock(&adap->stats_lock); t4_tp_get_tcp_stats(adap, v4, v6); spin_unlock(&adap->stats_lock); } EXPORT_SYMBOL(cxgb4_get_tcp_stats); void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, const unsigned int *pgsz_order) { struct adapter *adap = netdev2adap(dev); t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | HPZ3(pgsz_order[3])); } EXPORT_SYMBOL(cxgb4_iscsi_init); static struct pci_driver cxgb4_driver; static void check_neigh_update(struct neighbour *neigh) { const struct device *parent; const struct net_device *netdev = neigh->dev; if (netdev->priv_flags & IFF_802_1Q_VLAN) netdev = vlan_dev_real_dev(netdev); parent = netdev->dev.parent; if (parent && parent->driver == &cxgb4_driver.driver) t4_l2t_update(dev_get_drvdata(parent), neigh); } static int netevent_cb(struct notifier_block *nb, unsigned long event, void *data) { switch (event) { case NETEVENT_NEIGH_UPDATE: check_neigh_update(data); break; case NETEVENT_PMTU_UPDATE: case NETEVENT_REDIRECT: default: break; } return 0; } static bool netevent_registered; static struct notifier_block cxgb4_netevent_nb = { .notifier_call = netevent_cb }; static void uld_attach(struct adapter *adap, unsigned int uld) { void *handle; struct cxgb4_lld_info lli; lli.pdev = adap->pdev; lli.l2t = adap->l2t; lli.tids = &adap->tids; lli.ports = adap->port; lli.vr = &adap->vres; lli.mtus = adap->params.mtus; if (uld == CXGB4_ULD_RDMA) { lli.rxq_ids = adap->sge.rdma_rxq; lli.nrxq = adap->sge.rdmaqs; } else if (uld == CXGB4_ULD_ISCSI) { lli.rxq_ids = adap->sge.ofld_rxq; lli.nrxq = adap->sge.ofldqsets; } lli.ntxq = adap->sge.ofldqsets; lli.nchan = adap->params.nports; lli.nports = adap->params.nports; lli.wr_cred = adap->params.ofldq_wr_cred; lli.adapter_type = adap->params.rev; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> (adap->fn * 4)); lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> (adap->fn * 4)); lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); lli.fw_vers = adap->params.fw_vers; handle = ulds[uld].add(&lli); if (IS_ERR(handle)) { dev_warn(adap->pdev_dev, "could not attach to the %s driver, error %ld\n", uld_str[uld], PTR_ERR(handle)); return; } adap->uld_handle[uld] = handle; if (!netevent_registered) { register_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = true; } if (adap->flags & FULL_INIT_DONE) ulds[uld].state_change(handle, CXGB4_STATE_UP); } static void attach_ulds(struct adapter *adap) { unsigned int i; mutex_lock(&uld_mutex); list_add_tail(&adap->list_node, &adapter_list); for (i = 0; i < CXGB4_ULD_MAX; i++) if (ulds[i].add) uld_attach(adap, i); mutex_unlock(&uld_mutex); } static void detach_ulds(struct adapter *adap) { unsigned int i; mutex_lock(&uld_mutex); list_del(&adap->list_node); for (i = 0; i < CXGB4_ULD_MAX; i++) if (adap->uld_handle[i]) { ulds[i].state_change(adap->uld_handle[i], CXGB4_STATE_DETACH); adap->uld_handle[i] = NULL; } if (netevent_registered && list_empty(&adapter_list)) { unregister_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = false; } mutex_unlock(&uld_mutex); } static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) { unsigned int i; mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) if (adap->uld_handle[i]) ulds[i].state_change(adap->uld_handle[i], new_state); mutex_unlock(&uld_mutex); } /** * cxgb4_register_uld - register an upper-layer driver * @type: the ULD type * @p: the ULD methods * * Registers an upper-layer driver with this driver and notifies the ULD * about any presently available devices that support its type. Returns * %-EBUSY if a ULD of the same type is already registered. */ int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) { int ret = 0; struct adapter *adap; if (type >= CXGB4_ULD_MAX) return -EINVAL; mutex_lock(&uld_mutex); if (ulds[type].add) { ret = -EBUSY; goto out; } ulds[type] = *p; list_for_each_entry(adap, &adapter_list, list_node) uld_attach(adap, type); out: mutex_unlock(&uld_mutex); return ret; } EXPORT_SYMBOL(cxgb4_register_uld); /** * cxgb4_unregister_uld - unregister an upper-layer driver * @type: the ULD type * * Unregisters an existing upper-layer driver. */ int cxgb4_unregister_uld(enum cxgb4_uld type) { struct adapter *adap; if (type >= CXGB4_ULD_MAX) return -EINVAL; mutex_lock(&uld_mutex); list_for_each_entry(adap, &adapter_list, list_node) adap->uld_handle[type] = NULL; ulds[type].add = NULL; mutex_unlock(&uld_mutex); return 0; } EXPORT_SYMBOL(cxgb4_unregister_uld); /** * cxgb_up - enable the adapter * @adap: adapter being enabled * * Called when the first port is enabled, this function performs the * actions necessary to make an adapter operational, such as completing * the initialization of HW modules, and enabling interrupts. * * Must be called with the rtnl lock held. */ static int cxgb_up(struct adapter *adap) { int err; err = setup_sge_queues(adap); if (err) goto out; err = setup_rss(adap); if (err) goto freeq; if (adap->flags & USING_MSIX) { name_msix_vecs(adap); err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, adap->msix_info[0].desc, adap); if (err) goto irq_err; err = request_msix_queue_irqs(adap); if (err) { free_irq(adap->msix_info[0].vec, adap); goto irq_err; } } else { err = request_irq(adap->pdev->irq, t4_intr_handler(adap), (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, adap->name, adap); if (err) goto irq_err; } enable_rx(adap); t4_sge_start(adap); t4_intr_enable(adap); adap->flags |= FULL_INIT_DONE; notify_ulds(adap, CXGB4_STATE_UP); out: return err; irq_err: dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); freeq: t4_free_sge_resources(adap); goto out; } static void cxgb_down(struct adapter *adapter) { t4_intr_disable(adapter); cancel_work_sync(&adapter->tid_release_task); adapter->tid_release_task_busy = false; adapter->tid_release_head = NULL; if (adapter->flags & USING_MSIX) { free_msix_queue_irqs(adapter); free_irq(adapter->msix_info[0].vec, adapter); } else free_irq(adapter->pdev->irq, adapter); quiesce_rx(adapter); t4_sge_stop(adapter); t4_free_sge_resources(adapter); adapter->flags &= ~FULL_INIT_DONE; } /* * net_device operations */ static int cxgb_open(struct net_device *dev) { int err; struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; if (!(adapter->flags & FULL_INIT_DONE)) { err = cxgb_up(adapter); if (err < 0) return err; } dev->real_num_tx_queues = pi->nqsets; err = link_start(dev); if (!err) netif_tx_start_all_queues(dev); return err; } static int cxgb_close(struct net_device *dev) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; netif_tx_stop_all_queues(dev); netif_carrier_off(dev); return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); } static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, struct rtnl_link_stats64 *ns) { struct port_stats stats; struct port_info *p = netdev_priv(dev); struct adapter *adapter = p->adapter; spin_lock(&adapter->stats_lock); t4_get_port_stats(adapter, p->tx_chan, &stats); spin_unlock(&adapter->stats_lock); ns->tx_bytes = stats.tx_octets; ns->tx_packets = stats.tx_frames; ns->rx_bytes = stats.rx_octets; ns->rx_packets = stats.rx_frames; ns->multicast = stats.rx_mcast_frames; /* detailed rx_errors */ ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + stats.rx_runt; ns->rx_over_errors = 0; ns->rx_crc_errors = stats.rx_fcs_err; ns->rx_frame_errors = stats.rx_symbol_err; ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + stats.rx_ovflow2 + stats.rx_ovflow3 + stats.rx_trunc0 + stats.rx_trunc1 + stats.rx_trunc2 + stats.rx_trunc3; ns->rx_missed_errors = 0; /* detailed tx_errors */ ns->tx_aborted_errors = 0; ns->tx_carrier_errors = 0; ns->tx_fifo_errors = 0; ns->tx_heartbeat_errors = 0; ns->tx_window_errors = 0; ns->tx_errors = stats.tx_error_frames; ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; return ns; } static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { unsigned int mbox; int ret = 0, prtad, devad; struct port_info *pi = netdev_priv(dev); struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; switch (cmd) { case SIOCGMIIPHY: if (pi->mdio_addr < 0) return -EOPNOTSUPP; data->phy_id = pi->mdio_addr; break; case SIOCGMIIREG: case SIOCSMIIREG: if (mdio_phy_id_is_c45(data->phy_id)) { prtad = mdio_phy_id_prtad(data->phy_id); devad = mdio_phy_id_devad(data->phy_id); } else if (data->phy_id < 32) { prtad = data->phy_id; devad = 0; data->reg_num &= 0x1f; } else return -EINVAL; mbox = pi->adapter->fn; if (cmd == SIOCGMIIREG) ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, data->reg_num, &data->val_out); else ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, data->reg_num, data->val_in); break; default: return -EOPNOTSUPP; } return ret; } static void cxgb_set_rxmode(struct net_device *dev) { /* unfortunately we can't return errors to the stack */ set_rxmode(dev, -1, false); } static int cxgb_change_mtu(struct net_device *dev, int new_mtu) { int ret; struct port_info *pi = netdev_priv(dev); if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ return -EINVAL; ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) dev->mtu = new_mtu; return ret; } static int cxgb_set_mac_addr(struct net_device *dev, void *p) { int ret; struct sockaddr *addr = p; struct port_info *pi = netdev_priv(dev); if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, pi->xact_addr_filt, addr->sa_data, true, true); if (ret < 0) return ret; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); pi->xact_addr_filt = ret; return 0; } static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct port_info *pi = netdev_priv(dev); pi->vlan_grp = grp; t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1, grp != NULL, true); } #ifdef CONFIG_NET_POLL_CONTROLLER static void cxgb_netpoll(struct net_device *dev) { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; if (adap->flags & USING_MSIX) { int i; struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; for (i = pi->nqsets; i; i--, rx++) t4_sge_intr_msix(0, &rx->rspq); } else t4_intr_handler(adap)(0, adap); } #endif static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, .ndo_start_xmit = t4_eth_xmit, .ndo_get_stats64 = cxgb_get_stats, .ndo_set_rx_mode = cxgb_set_rxmode, .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = cxgb_ioctl, .ndo_change_mtu = cxgb_change_mtu, .ndo_vlan_rx_register = vlan_rx_register, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb_netpoll, #endif }; void t4_fatal_err(struct adapter *adap) { t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); t4_intr_disable(adap); dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); } static void setup_memwin(struct adapter *adap) { u32 bar0; bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), (bar0 + MEMWIN0_BASE) | BIR(0) | WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), (bar0 + MEMWIN1_BASE) | BIR(0) | WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), (bar0 + MEMWIN2_BASE) | BIR(0) | WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); if (adap->vres.ocq.size) { unsigned int start, sz_kb; start = pci_resource_start(adap->pdev, 2) + OCQ_WIN_OFFSET(adap->pdev, &adap->vres); sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), start | BIR(1) | WINDOW(ilog2(sz_kb))); t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), adap->vres.ocq.start); t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); } } static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) { u32 v; int ret; /* get device capabilities */ memset(c, 0, sizeof(*c)); c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ); c->retval_len16 = htonl(FW_LEN16(*c)); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); if (ret < 0) return ret; /* select capabilities we'll be using */ if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { if (!vf_acls) c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); else c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); } else if (vf_acls) { dev_err(adap->pdev_dev, "virtualization ACLs not supported"); return ret; } c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); if (ret < 0) return ret; ret = t4_config_glbl_rss(adap, adap->fn, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); if (ret < 0) return ret; ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); if (ret < 0) return ret; t4_sge_init(adap); /* tweak some settings */ t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); v = t4_read_reg(adap, TP_PIO_DATA); t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); /* get basic stuff going */ return t4_early_init(adap, adap->fn); } /* * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. */ #define MAX_ATIDS 8192U /* * Phase 0 of initialization: contact FW, obtain config, perform basic init. */ static int adap_init0(struct adapter *adap) { int ret; u32 v, port_vec; enum dev_state state; u32 params[7], val[7]; struct fw_caps_config_cmd c; ret = t4_check_fw_version(adap); if (ret == -EINVAL || ret > 0) { if (upgrade_fw(adap) >= 0) /* recache FW version */ ret = t4_check_fw_version(adap); } if (ret < 0) return ret; /* contact FW, request master */ ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); return ret; } /* reset device */ ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); if (ret < 0) goto bye; for (v = 0; v < SGE_NTIMERS - 1; v++) adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; adap->sge.counter_val[0] = 1; for (v = 1; v < SGE_NCOUNTERS; v++) adap->sge.counter_val[v] = min(intr_cnt[v - 1], THRESHOLD_3_MASK); #define FW_PARAM_DEV(param) \ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) params[0] = FW_PARAM_DEV(CCLK); ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); if (ret < 0) goto bye; adap->params.vpd.cclk = val[0]; ret = adap_init1(adap, &c); if (ret < 0) goto bye; #define FW_PARAM_PFVF(param) \ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ FW_PARAMS_PARAM_Y(adap->fn)) params[0] = FW_PARAM_DEV(PORTVEC); params[1] = FW_PARAM_PFVF(L2T_START); params[2] = FW_PARAM_PFVF(L2T_END); params[3] = FW_PARAM_PFVF(FILTER_START); params[4] = FW_PARAM_PFVF(FILTER_END); ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val); if (ret < 0) goto bye; port_vec = val[0]; adap->tids.ftid_base = val[3]; adap->tids.nftids = val[4] - val[3] + 1; if (c.ofldcaps) { /* query offload-related parameters */ params[0] = FW_PARAM_DEV(NTID); params[1] = FW_PARAM_PFVF(SERVER_START); params[2] = FW_PARAM_PFVF(SERVER_END); params[3] = FW_PARAM_PFVF(TDDP_START); params[4] = FW_PARAM_PFVF(TDDP_END); params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, val); if (ret < 0) goto bye; adap->tids.ntids = val[0]; adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); adap->tids.stid_base = val[1]; adap->tids.nstids = val[2] - val[1] + 1; adap->vres.ddp.start = val[3]; adap->vres.ddp.size = val[4] - val[3] + 1; adap->params.ofldq_wr_cred = val[5]; adap->params.offload = 1; } if (c.rdmacaps) { params[0] = FW_PARAM_PFVF(STAG_START); params[1] = FW_PARAM_PFVF(STAG_END); params[2] = FW_PARAM_PFVF(RQ_START); params[3] = FW_PARAM_PFVF(RQ_END); params[4] = FW_PARAM_PFVF(PBL_START); params[5] = FW_PARAM_PFVF(PBL_END); ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, val); if (ret < 0) goto bye; adap->vres.stag.start = val[0]; adap->vres.stag.size = val[1] - val[0] + 1; adap->vres.rq.start = val[2]; adap->vres.rq.size = val[3] - val[2] + 1; adap->vres.pbl.start = val[4]; adap->vres.pbl.size = val[5] - val[4] + 1; params[0] = FW_PARAM_PFVF(SQRQ_START); params[1] = FW_PARAM_PFVF(SQRQ_END); params[2] = FW_PARAM_PFVF(CQ_START); params[3] = FW_PARAM_PFVF(CQ_END); params[4] = FW_PARAM_PFVF(OCQ_START); params[5] = FW_PARAM_PFVF(OCQ_END); ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, val); if (ret < 0) goto bye; adap->vres.qp.start = val[0]; adap->vres.qp.size = val[1] - val[0] + 1; adap->vres.cq.start = val[2]; adap->vres.cq.size = val[3] - val[2] + 1; adap->vres.ocq.start = val[4]; adap->vres.ocq.size = val[5] - val[4] + 1; } if (c.iscsicaps) { params[0] = FW_PARAM_PFVF(ISCSI_START); params[1] = FW_PARAM_PFVF(ISCSI_END); ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, val); if (ret < 0) goto bye; adap->vres.iscsi.start = val[0]; adap->vres.iscsi.size = val[1] - val[0] + 1; } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV adap->params.nports = hweight32(port_vec); adap->params.portvec = port_vec; adap->flags |= FW_OK; /* These are finalized by FW initialization, load their values now */ v = t4_read_reg(adap, TP_TIMER_RESOLUTION); adap->params.tp.tre = TIMERRESOLUTION_GET(v); t4_read_mtu_tbl(adap, adap->params.mtus, NULL); t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); #ifdef CONFIG_PCI_IOV /* * Provision resource limits for Virtual Functions. We currently * grant them all the same static resource limits except for the Port * Access Rights Mask which we're assigning based on the PF. All of * the static provisioning stuff for both the PF and VF really needs * to be managed in a persistent manner for each device which the * firmware controls. */ { int pf, vf; for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { if (num_vf[pf] <= 0) continue; /* VF numbering starts at 1! */ for (vf = 1; vf <= num_vf[pf]; vf++) { ret = t4_cfg_pfvf(adap, adap->fn, pf, vf, VFRES_NEQ, VFRES_NETHCTRL, VFRES_NIQFLINT, VFRES_NIQ, VFRES_TC, VFRES_NVI, FW_PFVF_CMD_CMASK_MASK, pfvfres_pmask(adap, pf, vf), VFRES_NEXACTF, VFRES_R_CAPS, VFRES_WX_CAPS); if (ret < 0) dev_warn(adap->pdev_dev, "failed to " "provision pf/vf=%d/%d; " "err=%d\n", pf, vf, ret); } } } #endif setup_memwin(adap); return 0; /* * If a command timed out or failed with EIO FW does not operate within * its spec or something catastrophic happened to HW/FW, stop issuing * commands. */ bye: if (ret != -ETIMEDOUT && ret != -EIO) t4_fw_bye(adap, adap->fn); return ret; } /* EEH callbacks */ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { int i; struct adapter *adap = pci_get_drvdata(pdev); if (!adap) goto out; rtnl_lock(); adap->flags &= ~FW_OK; notify_ulds(adap, CXGB4_STATE_START_RECOVERY); for_each_port(adap, i) { struct net_device *dev = adap->port[i]; netif_device_detach(dev); netif_carrier_off(dev); } if (adap->flags & FULL_INIT_DONE) cxgb_down(adap); rtnl_unlock(); pci_disable_device(pdev); out: return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) { int i, ret; struct fw_caps_config_cmd c; struct adapter *adap = pci_get_drvdata(pdev); if (!adap) { pci_restore_state(pdev); pci_save_state(pdev); return PCI_ERS_RESULT_RECOVERED; } if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "cannot reenable PCI device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_cleanup_aer_uncorrect_error_status(pdev); if (t4_wait_dev_ready(adap) < 0) return PCI_ERS_RESULT_DISCONNECT; if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) return PCI_ERS_RESULT_DISCONNECT; adap->flags |= FW_OK; if (adap_init1(adap, &c)) return PCI_ERS_RESULT_DISCONNECT; for_each_port(adap, i) { struct port_info *p = adap2pinfo(adap, i); ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, NULL, NULL); if (ret < 0) return PCI_ERS_RESULT_DISCONNECT; p->viid = ret; p->xact_addr_filt = -1; } t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); setup_memwin(adap); if (cxgb_up(adap)) return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_RECOVERED; } static void eeh_resume(struct pci_dev *pdev) { int i; struct adapter *adap = pci_get_drvdata(pdev); if (!adap) return; rtnl_lock(); for_each_port(adap, i) { struct net_device *dev = adap->port[i]; if (netif_running(dev)) { link_start(dev); cxgb_set_rxmode(dev); } netif_device_attach(dev); } rtnl_unlock(); } static struct pci_error_handlers cxgb4_eeh = { .error_detected = eeh_err_detected, .slot_reset = eeh_slot_reset, .resume = eeh_resume, }; static inline bool is_10g_port(const struct link_config *lc) { return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; } static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, unsigned int size, unsigned int iqe_size) { q->intr_params = QINTR_TIMER_IDX(timer_idx) | (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; q->iqe_len = iqe_size; q->size = size; } /* * Perform default configuration of DMA queues depending on the number and type * of ports we found and the number of available CPUs. Most settings can be * modified by the admin prior to actual use. */ static void __devinit cfg_queues(struct adapter *adap) { struct sge *s = &adap->sge; int i, q10g = 0, n10g = 0, qidx = 0; for_each_port(adap, i) n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); /* * We default to 1 queue per non-10G port and up to # of cores queues * per 10G port. */ if (n10g) q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; if (q10g > num_online_cpus()) q10g = num_online_cpus(); for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); pi->first_qset = qidx; pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; qidx += pi->nqsets; } s->ethqsets = qidx; s->max_ethqsets = qidx; /* MSI-X may lower it later */ if (is_offload(adap)) { /* * For offload we use 1 queue/channel if all ports are up to 1G, * otherwise we divide all available queues amongst the channels * capped by the number of available cores. */ if (n10g) { i = min_t(int, ARRAY_SIZE(s->ofldrxq), num_online_cpus()); s->ofldqsets = roundup(i, adap->params.nports); } else s->ofldqsets = adap->params.nports; /* For RDMA one Rx queue per channel suffices */ s->rdmaqs = adap->params.nports; } for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { struct sge_eth_rxq *r = &s->ethrxq[i]; init_rspq(&r->rspq, 0, 0, 1024, 64); r->fl.size = 72; } for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) s->ethtxq[i].q.size = 1024; for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) s->ctrlq[i].q.size = 512; for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) s->ofldtxq[i].q.size = 1024; for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { struct sge_ofld_rxq *r = &s->ofldrxq[i]; init_rspq(&r->rspq, 0, 0, 1024, 64); r->rspq.uld = CXGB4_ULD_ISCSI; r->fl.size = 72; } for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { struct sge_ofld_rxq *r = &s->rdmarxq[i]; init_rspq(&r->rspq, 0, 0, 511, 64); r->rspq.uld = CXGB4_ULD_RDMA; r->fl.size = 72; } init_rspq(&s->fw_evtq, 6, 0, 512, 64); init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); } /* * Reduce the number of Ethernet queues across all ports to at most n. * n provides at least one queue per port. */ static void __devinit reduce_ethqs(struct adapter *adap, int n) { int i; struct port_info *pi; while (n < adap->sge.ethqsets) for_each_port(adap, i) { pi = adap2pinfo(adap, i); if (pi->nqsets > 1) { pi->nqsets--; adap->sge.ethqsets--; if (adap->sge.ethqsets <= n) break; } } n = 0; for_each_port(adap, i) { pi = adap2pinfo(adap, i); pi->first_qset = n; n += pi->nqsets; } } /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ #define EXTRA_VECS 2 static int __devinit enable_msix(struct adapter *adap) { int ofld_need = 0; int i, err, want, need; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; struct msix_entry entries[MAX_INGQ + 1]; for (i = 0; i < ARRAY_SIZE(entries); ++i) entries[i].entry = i; want = s->max_ethqsets + EXTRA_VECS; if (is_offload(adap)) { want += s->rdmaqs + s->ofldqsets; /* need nchan for each possible ULD */ ofld_need = 2 * nchan; } need = adap->params.nports + EXTRA_VECS + ofld_need; while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) want = err; if (!err) { /* * Distribute available vectors to the various queue groups. * Every group gets its minimum requirement and NIC gets top * priority for leftovers. */ i = want - EXTRA_VECS - ofld_need; if (i < s->max_ethqsets) { s->max_ethqsets = i; if (i < s->ethqsets) reduce_ethqs(adap, i); } if (is_offload(adap)) { i = want - EXTRA_VECS - s->max_ethqsets; i -= ofld_need - nchan; s->ofldqsets = (i / nchan) * nchan; /* round down */ } for (i = 0; i < want; ++i) adap->msix_info[i].vec = entries[i].vector; } else if (err > 0) dev_info(adap->pdev_dev, "only %d MSI-X vectors left, not using MSI-X\n", err); return err; } #undef EXTRA_VECS static int __devinit init_rss(struct adapter *adap) { unsigned int i, j; for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); if (!pi->rss) return -ENOMEM; for (j = 0; j < pi->rss_size; j++) pi->rss[j] = j % pi->nqsets; } return 0; } static void __devinit print_port_info(struct adapter *adap) { static const char *base[] = { "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", "KX", "KR", "KR SFP+", "KR FEC" }; int i; char buf[80]; const char *spd = ""; if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) spd = " 2.5 GT/s"; else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) spd = " 5 GT/s"; for_each_port(adap, i) { struct net_device *dev = adap->port[i]; const struct port_info *pi = netdev_priv(dev); char *bufp = buf; if (!test_bit(i, &adap->registered_device_map)) continue; if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) bufp += sprintf(bufp, "100/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) bufp += sprintf(bufp, "1000/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); if (bufp != buf) --bufp; sprintf(bufp, "BASE-%s", base[pi->port_type]); netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", adap->params.vpd.id, adap->params.rev, buf, is_offload(adap) ? "R" : "", adap->params.pci.width, spd, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); if (adap->name == dev->name) netdev_info(dev, "S/N: %s, E/C: %s\n", adap->params.vpd.sn, adap->params.vpd.ec); } } /* * Free the following resources: * - memory used for tables * - MSI/MSI-X * - net devices * - resources FW is holding for us */ static void free_some_resources(struct adapter *adapter) { unsigned int i; t4_free_mem(adapter->l2t); t4_free_mem(adapter->tids.tid_tab); disable_msi(adapter); for_each_port(adapter, i) if (adapter->port[i]) { kfree(adap2pinfo(adapter, i)->rss); free_netdev(adapter->port[i]); } if (adapter->flags & FW_OK) t4_fw_bye(adapter, adapter->fn); } #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) static int __devinit init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int func, i, err; struct port_info *pi; unsigned int highdma = 0; struct adapter *adapter = NULL; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { /* Just info, some other driver may have claimed the device. */ dev_info(&pdev->dev, "cannot obtain PCI resources\n"); return err; } /* We control everything through one PF */ func = PCI_FUNC(pdev->devfn); if (func != ent->driver_data) { pci_save_state(pdev); /* to restore SR-IOV later */ goto sriov; } err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "cannot enable PCI device\n"); goto out_release_regions; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { highdma = NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " "coherent allocations\n"); goto out_disable_device; } } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_disable_device; } } pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { err = -ENOMEM; goto out_disable_device; } adapter->regs = pci_ioremap_bar(pdev, 0); if (!adapter->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); err = -ENOMEM; goto out_free_adapter; } adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; adapter->fn = func; adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); err = t4_prep_adapter(adapter); if (err) goto out_unmap_bar; err = adap_init0(adapter); if (err) goto out_unmap_bar; for_each_port(adapter, i) { struct net_device *netdev; netdev = alloc_etherdev_mq(sizeof(struct port_info), MAX_ETH_QSETS); if (!netdev) { err = -ENOMEM; goto out_free_dev; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter->port[i] = netdev; pi = netdev_priv(netdev); pi->adapter = adapter; pi->xact_addr_filt = -1; pi->rx_offload = RX_CSO; pi->port_id = i; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); netdev->irq = pdev->irq; netdev->features |= NETIF_F_SG | TSO_FLAGS; netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; netdev->vlan_features = netdev->features & VLAN_FEAT; netdev->netdev_ops = &cxgb4_netdev_ops; SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); } pci_set_drvdata(pdev, adapter); if (adapter->flags & FW_OK) { err = t4_port_init(adapter, func, func, 0); if (err) goto out_free_dev; } /* * Configure queues and allocate tables now, they can be needed as * soon as the first register_netdev completes. */ cfg_queues(adapter); adapter->l2t = t4_init_l2t(); if (!adapter->l2t) { /* We tolerate a lack of L2T, giving up some functionality */ dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); adapter->params.offload = 0; } if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { dev_warn(&pdev->dev, "could not allocate TID table, " "continuing\n"); adapter->params.offload = 0; } /* See what interrupts we'll be using */ if (msi > 1 && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; else if (msi > 0 && pci_enable_msi(pdev) == 0) adapter->flags |= USING_MSI; err = init_rss(adapter); if (err) goto out_free_dev; /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only * with the ports we manage to register successfully. However we must * register at least one net device. */ for_each_port(adapter, i) { err = register_netdev(adapter->port[i]); if (err) dev_warn(&pdev->dev, "cannot register net device %s, skipping\n", adapter->port[i]->name); else { /* * Change the name we use for messages to the name of * the first successfully registered interface. */ if (!adapter->registered_device_map) adapter->name = adapter->port[i]->name; __set_bit(i, &adapter->registered_device_map); adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; } } if (!adapter->registered_device_map) { dev_err(&pdev->dev, "could not register any net devices\n"); goto out_free_dev; } if (cxgb4_debugfs_root) { adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), cxgb4_debugfs_root); setup_debugfs(adapter); } if (is_offload(adapter)) attach_ulds(adapter); print_port_info(adapter); sriov: #ifdef CONFIG_PCI_IOV if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) if (pci_enable_sriov(pdev, num_vf[func]) == 0) dev_info(&pdev->dev, "instantiated %u virtual functions\n", num_vf[func]); #endif return 0; out_free_dev: free_some_resources(adapter); out_unmap_bar: iounmap(adapter->regs); out_free_adapter: kfree(adapter); out_disable_device: pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); out_release_regions: pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); return err; } static void __devexit remove_one(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); pci_disable_sriov(pdev); if (adapter) { int i; if (is_offload(adapter)) detach_ulds(adapter); for_each_port(adapter, i) if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i]); if (adapter->debugfs_root) debugfs_remove_recursive(adapter->debugfs_root); if (adapter->flags & FULL_INIT_DONE) cxgb_down(adapter); free_some_resources(adapter); iounmap(adapter->regs); kfree(adapter); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); } else if (PCI_FUNC(pdev->devfn) > 0) pci_release_regions(pdev); } static struct pci_driver cxgb4_driver = { .name = KBUILD_MODNAME, .id_table = cxgb4_pci_tbl, .probe = init_one, .remove = __devexit_p(remove_one), .err_handler = &cxgb4_eeh, }; static int __init cxgb4_init_module(void) { int ret; /* Debugfs support is optional, just warn if this fails */ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); if (!cxgb4_debugfs_root) pr_warning("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4_driver); if (ret < 0) debugfs_remove(cxgb4_debugfs_root); return ret; } static void __exit cxgb4_cleanup_module(void) { pci_unregister_driver(&cxgb4_driver); debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ } module_init(cxgb4_init_module); module_exit(cxgb4_cleanup_module);