aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-17 11:58:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-17 11:58:52 -0400
commitb225ee5bed70254a100896c473e6dd8c2be45c18 (patch)
tree2f044d5898d15edcff858f624618c788d5b58760 /drivers/net
parent2e532d68a2b3e2aa6b19731501222069735c741c (diff)
parent95a5afca4a8d2e1cb77e1d4bc6ff9f718dc32f7a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: net: Remove CONFIG_KMOD from net/ (towards removing CONFIG_KMOD entirely) ipv4: Add a missing rcu_assign_pointer() in routing cache. [netdrvr] ibmtr: PCMCIA IBMTR is ok on 64bit xen-netfront: Avoid unaligned accesses to IP header lmc: copy_*_user under spinlock [netdrvr] myri10ge, ixgbe: remove broken select INTEL_IOATDMA
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c32
-rw-r--r--drivers/net/myri10ge/myri10ge.c26
-rw-r--r--drivers/net/pcmcia/Kconfig2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c31
-rw-r--r--drivers/net/xen-netfront.c5
7 files changed, 70 insertions, 42 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1d8af3348331..ad301ace6085 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2410,7 +2410,6 @@ config IXGBE
2410 tristate "Intel(R) 10GbE PCI Express adapters support" 2410 tristate "Intel(R) 10GbE PCI Express adapters support"
2411 depends on PCI && INET 2411 depends on PCI && INET
2412 select INET_LRO 2412 select INET_LRO
2413 select INTEL_IOATDMA
2414 ---help--- 2413 ---help---
2415 This driver supports Intel(R) 10GbE PCI Express family of 2414 This driver supports Intel(R) 10GbE PCI Express family of
2416 adapters. For more information on how to identify your adapter, go 2415 adapters. For more information on how to identify your adapter, go
@@ -2426,6 +2425,11 @@ config IXGBE
2426 To compile this driver as a module, choose M here. The module 2425 To compile this driver as a module, choose M here. The module
2427 will be called ixgbe. 2426 will be called ixgbe.
2428 2427
2428config IXGBE_DCA
2429 bool
2430 default y
2431 depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
2432
2429config IXGB 2433config IXGB
2430 tristate "Intel(R) PRO/10GbE support" 2434 tristate "Intel(R) PRO/10GbE support"
2431 depends on PCI 2435 depends on PCI
@@ -2462,7 +2466,6 @@ config MYRI10GE
2462 select FW_LOADER 2466 select FW_LOADER
2463 select CRC32 2467 select CRC32
2464 select INET_LRO 2468 select INET_LRO
2465 select INTEL_IOATDMA
2466 ---help--- 2469 ---help---
2467 This driver supports Myricom Myri-10G Dual Protocol interface in 2470 This driver supports Myricom Myri-10G Dual Protocol interface in
2468 Ethernet mode. If the eeprom on your board is not recent enough, 2471 Ethernet mode. If the eeprom on your board is not recent enough,
@@ -2474,6 +2477,11 @@ config MYRI10GE
2474 To compile this driver as a module, choose M here. The module 2477 To compile this driver as a module, choose M here. The module
2475 will be called myri10ge. 2478 will be called myri10ge.
2476 2479
2480config MYRI10GE_DCA
2481 bool
2482 default y
2483 depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
2484
2477config NETXEN_NIC 2485config NETXEN_NIC
2478 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" 2486 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
2479 depends on PCI 2487 depends on PCI
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 2198b77c53ed..e116d340dcc6 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -36,7 +36,7 @@
36#include "ixgbe_type.h" 36#include "ixgbe_type.h"
37#include "ixgbe_common.h" 37#include "ixgbe_common.h"
38 38
39#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 39#ifdef CONFIG_IXGBE_DCA
40#include <linux/dca.h> 40#include <linux/dca.h>
41#endif 41#endif
42 42
@@ -136,7 +136,7 @@ struct ixgbe_ring {
136 * offset associated with this ring, which is different 136 * offset associated with this ring, which is different
137 * for DCE and RSS modes */ 137 * for DCE and RSS modes */
138 138
139#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 139#ifdef CONFIG_IXGBE_DCA
140 /* cpu for tx queue */ 140 /* cpu for tx queue */
141 int cpu; 141 int cpu;
142#endif 142#endif
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ca17af4349d0..7548fb7360d9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -80,7 +80,7 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
80}; 80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82 82
83#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 83#ifdef CONFIG_IXGBE_DCA
84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p); 85 void *p);
86static struct notifier_block dca_notifier = { 86static struct notifier_block dca_notifier = {
@@ -296,7 +296,7 @@ done_cleaning:
296 return (total_packets ? true : false); 296 return (total_packets ? true : false);
297} 297}
298 298
299#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 299#ifdef CONFIG_IXGBE_DCA
300static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 300static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
301 struct ixgbe_ring *rx_ring) 301 struct ixgbe_ring *rx_ring)
302{ 302{
@@ -383,7 +383,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
383 return 0; 383 return 0;
384} 384}
385 385
386#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ 386#endif /* CONFIG_IXGBE_DCA */
387/** 387/**
388 * ixgbe_receive_skb - Send a completed packet up the stack 388 * ixgbe_receive_skb - Send a completed packet up the stack
389 * @adapter: board private structure 389 * @adapter: board private structure
@@ -947,7 +947,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
947 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 947 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
948 for (i = 0; i < q_vector->txr_count; i++) { 948 for (i = 0; i < q_vector->txr_count; i++) {
949 tx_ring = &(adapter->tx_ring[r_idx]); 949 tx_ring = &(adapter->tx_ring[r_idx]);
950#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 950#ifdef CONFIG_IXGBE_DCA
951 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 951 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
952 ixgbe_update_tx_dca(adapter, tx_ring); 952 ixgbe_update_tx_dca(adapter, tx_ring);
953#endif 953#endif
@@ -1022,7 +1022,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1022 1022
1023 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1023 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1024 rx_ring = &(adapter->rx_ring[r_idx]); 1024 rx_ring = &(adapter->rx_ring[r_idx]);
1025#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 1025#ifdef CONFIG_IXGBE_DCA
1026 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1026 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1027 ixgbe_update_rx_dca(adapter, rx_ring); 1027 ixgbe_update_rx_dca(adapter, rx_ring);
1028#endif 1028#endif
@@ -1066,7 +1066,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1066 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1066 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1067 for (i = 0; i < q_vector->rxr_count; i++) { 1067 for (i = 0; i < q_vector->rxr_count; i++) {
1068 rx_ring = &(adapter->rx_ring[r_idx]); 1068 rx_ring = &(adapter->rx_ring[r_idx]);
1069#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 1069#ifdef CONFIG_IXGBE_DCA
1070 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1070 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1071 ixgbe_update_rx_dca(adapter, rx_ring); 1071 ixgbe_update_rx_dca(adapter, rx_ring);
1072#endif 1072#endif
@@ -2155,7 +2155,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
2155 2155
2156 netif_carrier_off(netdev); 2156 netif_carrier_off(netdev);
2157 2157
2158#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 2158#ifdef CONFIG_IXGBE_DCA
2159 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2159 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2160 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 2160 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2161 dca_remove_requester(&adapter->pdev->dev); 2161 dca_remove_requester(&adapter->pdev->dev);
@@ -2167,7 +2167,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
2167 ixgbe_clean_all_tx_rings(adapter); 2167 ixgbe_clean_all_tx_rings(adapter);
2168 ixgbe_clean_all_rx_rings(adapter); 2168 ixgbe_clean_all_rx_rings(adapter);
2169 2169
2170#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 2170#ifdef CONFIG_IXGBE_DCA
2171 /* since we reset the hardware DCA settings were cleared */ 2171 /* since we reset the hardware DCA settings were cleared */
2172 if (dca_add_requester(&adapter->pdev->dev) == 0) { 2172 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2173 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 2173 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -2193,7 +2193,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2193 struct ixgbe_adapter *adapter = q_vector->adapter; 2193 struct ixgbe_adapter *adapter = q_vector->adapter;
2194 int tx_cleaned, work_done = 0; 2194 int tx_cleaned, work_done = 0;
2195 2195
2196#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 2196#ifdef CONFIG_IXGBE_DCA
2197 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2197 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2198 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 2198 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2199 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 2199 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
@@ -3922,7 +3922,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3922 if (err) 3922 if (err)
3923 goto err_register; 3923 goto err_register;
3924 3924
3925#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 3925#ifdef CONFIG_IXGBE_DCA
3926 if (dca_add_requester(&pdev->dev) == 0) { 3926 if (dca_add_requester(&pdev->dev) == 0) {
3927 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 3927 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3928 /* always use CB2 mode, difference is masked 3928 /* always use CB2 mode, difference is masked
@@ -3972,7 +3972,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3972 3972
3973 flush_scheduled_work(); 3973 flush_scheduled_work();
3974 3974
3975#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 3975#ifdef CONFIG_IXGBE_DCA
3976 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3976 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3977 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 3977 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3978 dca_remove_requester(&pdev->dev); 3978 dca_remove_requester(&pdev->dev);
@@ -4105,10 +4105,10 @@ static int __init ixgbe_init_module(void)
4105 4105
4106 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 4106 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
4107 4107
4108#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 4108#ifdef CONFIG_IXGBE_DCA
4109 dca_register_notify(&dca_notifier); 4109 dca_register_notify(&dca_notifier);
4110
4111#endif 4110#endif
4111
4112 ret = pci_register_driver(&ixgbe_driver); 4112 ret = pci_register_driver(&ixgbe_driver);
4113 return ret; 4113 return ret;
4114} 4114}
@@ -4123,13 +4123,13 @@ module_init(ixgbe_init_module);
4123 **/ 4123 **/
4124static void __exit ixgbe_exit_module(void) 4124static void __exit ixgbe_exit_module(void)
4125{ 4125{
4126#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 4126#ifdef CONFIG_IXGBE_DCA
4127 dca_unregister_notify(&dca_notifier); 4127 dca_unregister_notify(&dca_notifier);
4128#endif 4128#endif
4129 pci_unregister_driver(&ixgbe_driver); 4129 pci_unregister_driver(&ixgbe_driver);
4130} 4130}
4131 4131
4132#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 4132#ifdef CONFIG_IXGBE_DCA
4133static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 4133static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
4134 void *p) 4134 void *p)
4135{ 4135{
@@ -4140,7 +4140,7 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
4140 4140
4141 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 4141 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4142} 4142}
4143#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ 4143#endif /* CONFIG_IXGBE_DCA */
4144 4144
4145module_exit(ixgbe_exit_module); 4145module_exit(ixgbe_exit_module);
4146 4146
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 6dce901c7f45..a9aebad52652 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -188,7 +188,7 @@ struct myri10ge_slice_state {
188 dma_addr_t fw_stats_bus; 188 dma_addr_t fw_stats_bus;
189 int watchdog_tx_done; 189 int watchdog_tx_done;
190 int watchdog_tx_req; 190 int watchdog_tx_req;
191#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 191#ifdef CONFIG_MYRI10GE_DCA
192 int cached_dca_tag; 192 int cached_dca_tag;
193 int cpu; 193 int cpu;
194 __be32 __iomem *dca_tag; 194 __be32 __iomem *dca_tag;
@@ -220,7 +220,7 @@ struct myri10ge_priv {
220 int msi_enabled; 220 int msi_enabled;
221 int msix_enabled; 221 int msix_enabled;
222 struct msix_entry *msix_vectors; 222 struct msix_entry *msix_vectors;
223#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 223#ifdef CONFIG_MYRI10GE_DCA
224 int dca_enabled; 224 int dca_enabled;
225#endif 225#endif
226 u32 link_state; 226 u32 link_state;
@@ -902,7 +902,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
902 struct myri10ge_slice_state *ss; 902 struct myri10ge_slice_state *ss;
903 int i, status; 903 int i, status;
904 size_t bytes; 904 size_t bytes;
905#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 905#ifdef CONFIG_MYRI10GE_DCA
906 unsigned long dca_tag_off; 906 unsigned long dca_tag_off;
907#endif 907#endif
908 908
@@ -1012,7 +1012,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1012 } 1012 }
1013 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 1013 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1014 1014
1015#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1015#ifdef CONFIG_MYRI10GE_DCA
1016 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); 1016 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
1017 dca_tag_off = cmd.data0; 1017 dca_tag_off = cmd.data0;
1018 for (i = 0; i < mgp->num_slices; i++) { 1018 for (i = 0; i < mgp->num_slices; i++) {
@@ -1051,7 +1051,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1051 return status; 1051 return status;
1052} 1052}
1053 1053
1054#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1054#ifdef CONFIG_MYRI10GE_DCA
1055static void 1055static void
1056myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) 1056myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1057{ 1057{
@@ -1505,7 +1505,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1505 struct net_device *netdev = ss->mgp->dev; 1505 struct net_device *netdev = ss->mgp->dev;
1506 int work_done; 1506 int work_done;
1507 1507
1508#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1508#ifdef CONFIG_MYRI10GE_DCA
1509 if (ss->mgp->dca_enabled) 1509 if (ss->mgp->dca_enabled)
1510 myri10ge_update_dca(ss); 1510 myri10ge_update_dca(ss);
1511#endif 1511#endif
@@ -1736,7 +1736,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1736 "tx_boundary", "WC", "irq", "MSI", "MSIX", 1736 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1737 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1737 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1738 "serial_number", "watchdog_resets", 1738 "serial_number", "watchdog_resets",
1739#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1739#ifdef CONFIG_MYRI10GE_DCA
1740 "dca_capable_firmware", "dca_device_present", 1740 "dca_capable_firmware", "dca_device_present",
1741#endif 1741#endif
1742 "link_changes", "link_up", "dropped_link_overflow", 1742 "link_changes", "link_up", "dropped_link_overflow",
@@ -1815,7 +1815,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1815 data[i++] = (unsigned int)mgp->read_write_dma; 1815 data[i++] = (unsigned int)mgp->read_write_dma;
1816 data[i++] = (unsigned int)mgp->serial_number; 1816 data[i++] = (unsigned int)mgp->serial_number;
1817 data[i++] = (unsigned int)mgp->watchdog_resets; 1817 data[i++] = (unsigned int)mgp->watchdog_resets;
1818#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1818#ifdef CONFIG_MYRI10GE_DCA
1819 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); 1819 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1820 data[i++] = (unsigned int)(mgp->dca_enabled); 1820 data[i++] = (unsigned int)(mgp->dca_enabled);
1821#endif 1821#endif
@@ -3844,7 +3844,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3844 dev_err(&pdev->dev, "failed reset\n"); 3844 dev_err(&pdev->dev, "failed reset\n");
3845 goto abort_with_slices; 3845 goto abort_with_slices;
3846 } 3846 }
3847#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3847#ifdef CONFIG_MYRI10GE_DCA
3848 myri10ge_setup_dca(mgp); 3848 myri10ge_setup_dca(mgp);
3849#endif 3849#endif
3850 pci_set_drvdata(pdev, mgp); 3850 pci_set_drvdata(pdev, mgp);
@@ -3948,7 +3948,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
3948 netdev = mgp->dev; 3948 netdev = mgp->dev;
3949 unregister_netdev(netdev); 3949 unregister_netdev(netdev);
3950 3950
3951#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3951#ifdef CONFIG_MYRI10GE_DCA
3952 myri10ge_teardown_dca(mgp); 3952 myri10ge_teardown_dca(mgp);
3953#endif 3953#endif
3954 myri10ge_dummy_rdma(mgp, 0); 3954 myri10ge_dummy_rdma(mgp, 0);
@@ -3993,7 +3993,7 @@ static struct pci_driver myri10ge_driver = {
3993#endif 3993#endif
3994}; 3994};
3995 3995
3996#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3996#ifdef CONFIG_MYRI10GE_DCA
3997static int 3997static int
3998myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) 3998myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
3999{ 3999{
@@ -4024,7 +4024,7 @@ static __init int myri10ge_init_module(void)
4024 myri10ge_driver.name, myri10ge_rss_hash); 4024 myri10ge_driver.name, myri10ge_rss_hash);
4025 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; 4025 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
4026 } 4026 }
4027#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 4027#ifdef CONFIG_MYRI10GE_DCA
4028 dca_register_notify(&myri10ge_dca_notifier); 4028 dca_register_notify(&myri10ge_dca_notifier);
4029#endif 4029#endif
4030 if (myri10ge_max_slices > MYRI10GE_MAX_SLICES) 4030 if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
@@ -4037,7 +4037,7 @@ module_init(myri10ge_init_module);
4037 4037
4038static __exit void myri10ge_cleanup_module(void) 4038static __exit void myri10ge_cleanup_module(void)
4039{ 4039{
4040#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 4040#ifdef CONFIG_MYRI10GE_DCA
4041 dca_unregister_notify(&myri10ge_dca_notifier); 4041 dca_unregister_notify(&myri10ge_dca_notifier);
4042#endif 4042#endif
4043 pci_unregister_driver(&myri10ge_driver); 4043 pci_unregister_driver(&myri10ge_driver);
diff --git a/drivers/net/pcmcia/Kconfig b/drivers/net/pcmcia/Kconfig
index e8f55d8ed7a9..9b8f793b1cc8 100644
--- a/drivers/net/pcmcia/Kconfig
+++ b/drivers/net/pcmcia/Kconfig
@@ -111,7 +111,7 @@ config ARCNET_COM20020_CS
111 111
112config PCMCIA_IBMTR 112config PCMCIA_IBMTR
113 tristate "IBM PCMCIA tokenring adapter support" 113 tristate "IBM PCMCIA tokenring adapter support"
114 depends on IBMTR!=y && TR && !64BIT 114 depends on IBMTR!=y && TR
115 help 115 help
116 Say Y here if you intend to attach this type of Token Ring PCMCIA 116 Say Y here if you intend to attach this type of Token Ring PCMCIA
117 card to your computer. You then also need to say Y to "Token Ring 117 card to your computer. You then also need to say Y to "Token Ring
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index f80640f5a744..d7bb63e616b5 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -122,7 +122,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
122 * Most functions mess with the structure 122 * Most functions mess with the structure
123 * Disable interrupts while we do the polling 123 * Disable interrupts while we do the polling
124 */ 124 */
125 spin_lock_irqsave(&sc->lmc_lock, flags);
126 125
127 switch (cmd) { 126 switch (cmd) {
128 /* 127 /*
@@ -152,6 +151,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
152 break; 151 break;
153 } 152 }
154 153
154 spin_lock_irqsave(&sc->lmc_lock, flags);
155 sc->lmc_media->set_status (sc, &ctl); 155 sc->lmc_media->set_status (sc, &ctl);
156 156
157 if(ctl.crc_length != sc->ictl.crc_length) { 157 if(ctl.crc_length != sc->ictl.crc_length) {
@@ -161,6 +161,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
161 else 161 else
162 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; 162 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
163 } 163 }
164 spin_unlock_irqrestore(&sc->lmc_lock, flags);
164 165
165 ret = 0; 166 ret = 0;
166 break; 167 break;
@@ -187,15 +188,18 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
187 break; /* no change */ 188 break; /* no change */
188 } 189 }
189 190
191 spin_lock_irqsave(&sc->lmc_lock, flags);
190 lmc_proto_close(sc); 192 lmc_proto_close(sc);
191 193
192 sc->if_type = new_type; 194 sc->if_type = new_type;
193 lmc_proto_attach(sc); 195 lmc_proto_attach(sc);
194 ret = lmc_proto_open(sc); 196 ret = lmc_proto_open(sc);
197 spin_unlock_irqrestore(&sc->lmc_lock, flags);
195 break; 198 break;
196 } 199 }
197 200
198 case LMCIOCGETXINFO: /*fold01*/ 201 case LMCIOCGETXINFO: /*fold01*/
202 spin_lock_irqsave(&sc->lmc_lock, flags);
199 sc->lmc_xinfo.Magic0 = 0xBEEFCAFE; 203 sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
200 204
201 sc->lmc_xinfo.PciCardType = sc->lmc_cardtype; 205 sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
@@ -208,6 +212,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
208 sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ; 212 sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
209 sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc); 213 sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
210 sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16); 214 sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
215 spin_unlock_irqrestore(&sc->lmc_lock, flags);
211 216
212 sc->lmc_xinfo.Magic1 = 0xDEADBEEF; 217 sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
213 218
@@ -220,6 +225,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
220 break; 225 break;
221 226
222 case LMCIOCGETLMCSTATS: 227 case LMCIOCGETLMCSTATS:
228 spin_lock_irqsave(&sc->lmc_lock, flags);
223 if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { 229 if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
224 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); 230 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
225 sc->extra_stats.framingBitErrorCount += 231 sc->extra_stats.framingBitErrorCount +=
@@ -243,6 +249,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
243 sc->extra_stats.severelyErroredFrameCount += 249 sc->extra_stats.severelyErroredFrameCount +=
244 regVal & T1FRAMER_SEF_MASK; 250 regVal & T1FRAMER_SEF_MASK;
245 } 251 }
252 spin_unlock_irqrestore(&sc->lmc_lock, flags);
246 if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats, 253 if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
247 sizeof(sc->lmc_device->stats)) || 254 sizeof(sc->lmc_device->stats)) ||
248 copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats), 255 copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
@@ -258,12 +265,14 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
258 break; 265 break;
259 } 266 }
260 267
268 spin_lock_irqsave(&sc->lmc_lock, flags);
261 memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); 269 memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
262 memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); 270 memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
263 sc->extra_stats.check = STATCHECK; 271 sc->extra_stats.check = STATCHECK;
264 sc->extra_stats.version_size = (DRIVER_VERSION << 16) + 272 sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
265 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); 273 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
266 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; 274 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
275 spin_unlock_irqrestore(&sc->lmc_lock, flags);
267 ret = 0; 276 ret = 0;
268 break; 277 break;
269 278
@@ -282,8 +291,10 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
282 ret = -EFAULT; 291 ret = -EFAULT;
283 break; 292 break;
284 } 293 }
294 spin_lock_irqsave(&sc->lmc_lock, flags);
285 sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); 295 sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
286 sc->ictl.circuit_type = ctl.circuit_type; 296 sc->ictl.circuit_type = ctl.circuit_type;
297 spin_unlock_irqrestore(&sc->lmc_lock, flags);
287 ret = 0; 298 ret = 0;
288 299
289 break; 300 break;
@@ -294,12 +305,14 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
294 break; 305 break;
295 } 306 }
296 307
308 spin_lock_irqsave(&sc->lmc_lock, flags);
297 /* Reset driver and bring back to current state */ 309 /* Reset driver and bring back to current state */
298 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); 310 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
299 lmc_running_reset (dev); 311 lmc_running_reset (dev);
300 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); 312 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
301 313
302 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); 314 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
315 spin_unlock_irqrestore(&sc->lmc_lock, flags);
303 316
304 ret = 0; 317 ret = 0;
305 break; 318 break;
@@ -338,14 +351,15 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
338 */ 351 */
339 netif_stop_queue(dev); 352 netif_stop_queue(dev);
340 353
341 if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) { 354 if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
342 ret = -EFAULT; 355 ret = -EFAULT;
343 break; 356 break;
344 } 357 }
345 switch(xc.command){ 358 switch(xc.command){
346 case lmc_xilinx_reset: /*fold02*/ 359 case lmc_xilinx_reset: /*fold02*/
347 { 360 {
348 u16 mii; 361 u16 mii;
362 spin_lock_irqsave(&sc->lmc_lock, flags);
349 mii = lmc_mii_readreg (sc, 0, 16); 363 mii = lmc_mii_readreg (sc, 0, 16);
350 364
351 /* 365 /*
@@ -404,6 +418,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
404 lmc_led_off(sc, LMC_DS3_LED2); 418 lmc_led_off(sc, LMC_DS3_LED2);
405 } 419 }
406 } 420 }
421 spin_unlock_irqrestore(&sc->lmc_lock, flags);
407 422
408 423
409 424
@@ -416,6 +431,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
416 { 431 {
417 u16 mii; 432 u16 mii;
418 int timeout = 500000; 433 int timeout = 500000;
434 spin_lock_irqsave(&sc->lmc_lock, flags);
419 mii = lmc_mii_readreg (sc, 0, 16); 435 mii = lmc_mii_readreg (sc, 0, 16);
420 436
421 /* 437 /*
@@ -451,13 +467,14 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
451 */ 467 */
452 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && 468 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
453 (timeout-- > 0)) 469 (timeout-- > 0))
454 ; 470 cpu_relax();
455 471
456 472
457 /* 473 /*
458 * stop driving Xilinx-related signals 474 * stop driving Xilinx-related signals
459 */ 475 */
460 lmc_gpio_mkinput(sc, 0xff); 476 lmc_gpio_mkinput(sc, 0xff);
477 spin_unlock_irqrestore(&sc->lmc_lock, flags);
461 478
462 ret = 0x0; 479 ret = 0x0;
463 480
@@ -493,6 +510,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
493 510
494 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data); 511 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
495 512
513 spin_lock_irqsave(&sc->lmc_lock, flags);
496 lmc_gpio_mkinput(sc, 0xff); 514 lmc_gpio_mkinput(sc, 0xff);
497 515
498 /* 516 /*
@@ -545,7 +563,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
545 */ 563 */
546 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && 564 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
547 (timeout-- > 0)) 565 (timeout-- > 0))
548 ; 566 cpu_relax();
549 567
550 printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout); 568 printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
551 569
@@ -588,6 +606,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
588 606
589 sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; 607 sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
590 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); 608 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
609 spin_unlock_irqrestore(&sc->lmc_lock, flags);
591 610
592 kfree(data); 611 kfree(data);
593 612
@@ -611,8 +630,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
611 break; 630 break;
612 } 631 }
613 632
614 spin_unlock_irqrestore(&sc->lmc_lock, flags); /*fold01*/
615
616 lmc_trace(dev, "lmc_ioctl out"); 633 lmc_trace(dev, "lmc_ioctl out");
617 634
618 return ret; 635 return ret;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5c7a87e38951..c6948d8f53f6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -239,11 +239,14 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
239 */ 239 */
240 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 240 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
241 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 241 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
242 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, 242 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
243 GFP_ATOMIC | __GFP_NOWARN); 243 GFP_ATOMIC | __GFP_NOWARN);
244 if (unlikely(!skb)) 244 if (unlikely(!skb))
245 goto no_skb; 245 goto no_skb;
246 246
247 /* Align ip header to a 16 bytes boundary */
248 skb_reserve(skb, NET_IP_ALIGN);
249
247 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 250 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
248 if (!page) { 251 if (!page) {
249 kfree_skb(skb); 252 kfree_skb(skb);