aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-26 11:48:49 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-26 11:48:49 -0400
commitc3cc99ff5d24e2eeaf7ec2032e720681916990e3 (patch)
treec3e74171bbbd2adde9d60b9db1c440415c8d2831 /drivers/net
parent38ffbe66d59051fd9cfcfc8545f164700e2fa3bc (diff)
parent024e8ac04453b3525448c31ef39848cf675ba6db (diff)
Merge branch 'linus' into x86/xen
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/fec.c54
-rw-r--r--drivers/net/ibmveth.c189
-rw-r--r--drivers/net/ibmveth.h5
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/eq.c1
-rw-r--r--drivers/net/mlx4/fw.c18
-rw-r--r--drivers/net/mlx4/fw.h2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mr.c49
-rw-r--r--drivers/net/mlx4/pd.c7
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/virtio_net.c114
14 files changed, 316 insertions, 136 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 869544b8c05c..9c0f56b3c518 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4067,8 +4067,6 @@ static void e1000_netpoll(struct net_device *netdev)
4067 disable_irq(adapter->pdev->irq); 4067 disable_irq(adapter->pdev->irq);
4068 e1000_intr(adapter->pdev->irq, netdev); 4068 e1000_intr(adapter->pdev->irq, netdev);
4069 4069
4070 e1000_clean_tx_irq(adapter);
4071
4072 enable_irq(adapter->pdev->irq); 4070 enable_irq(adapter->pdev->irq);
4073} 4071}
4074#endif 4072#endif
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 32a4f17d35fc..ecd5c71a7a8a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -2,12 +2,6 @@
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 * 4 *
5 * This version of the driver is specific to the FADS implementation,
6 * since the board contains control registers external to the processor
7 * for the control of the LevelOne LXT970 transceiver. The MPC860T manual
8 * describes connections using the internal parallel port I/O, which
9 * is basically all of Port D.
10 *
11 * Right now, I am very wasteful with the buffers. I allocate memory 5 * Right now, I am very wasteful with the buffers. I allocate memory
12 * pages and then divide them into 2K frame buffers. This way I know I 6 * pages and then divide them into 2K frame buffers. This way I know I
13 * have buffers large enough to hold one frame within one buffer descriptor. 7 * have buffers large enough to hold one frame within one buffer descriptor.
@@ -49,17 +43,9 @@
49#include <asm/pgtable.h> 43#include <asm/pgtable.h>
50#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
51 45
52#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
53 defined(CONFIG_M5272) || defined(CONFIG_M528x) || \
54 defined(CONFIG_M520x) || defined(CONFIG_M532x)
55#include <asm/coldfire.h> 46#include <asm/coldfire.h>
56#include <asm/mcfsim.h> 47#include <asm/mcfsim.h>
57#include "fec.h" 48#include "fec.h"
58#else
59#include <asm/8xx_immap.h>
60#include <asm/mpc8xx.h>
61#include "commproc.h"
62#endif
63 49
64#if defined(CONFIG_FEC2) 50#if defined(CONFIG_FEC2)
65#define FEC_MAX_PORTS 2 51#define FEC_MAX_PORTS 2
@@ -67,7 +53,7 @@
67#define FEC_MAX_PORTS 1 53#define FEC_MAX_PORTS 1
68#endif 54#endif
69 55
70#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272) 56#if defined(CONFIG_M5272)
71#define HAVE_mii_link_interrupt 57#define HAVE_mii_link_interrupt
72#endif 58#endif
73 59
@@ -1235,14 +1221,9 @@ static phy_info_t const * const phy_info[] = {
1235 1221
1236/* ------------------------------------------------------------------------- */ 1222/* ------------------------------------------------------------------------- */
1237#ifdef HAVE_mii_link_interrupt 1223#ifdef HAVE_mii_link_interrupt
1238#ifdef CONFIG_RPXCLASSIC
1239static void
1240mii_link_interrupt(void *dev_id);
1241#else
1242static irqreturn_t 1224static irqreturn_t
1243mii_link_interrupt(int irq, void * dev_id); 1225mii_link_interrupt(int irq, void * dev_id);
1244#endif 1226#endif
1245#endif
1246 1227
1247#if defined(CONFIG_M5272) 1228#if defined(CONFIG_M5272)
1248/* 1229/*
@@ -1795,24 +1776,6 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1795 1776
1796 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) 1777 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0)
1797 panic("Could not allocate FEC IRQ!"); 1778 panic("Could not allocate FEC IRQ!");
1798
1799#ifdef CONFIG_RPXCLASSIC
1800 /* Make Port C, bit 15 an input that causes interrupts.
1801 */
1802 immap->im_ioport.iop_pcpar &= ~0x0001;
1803 immap->im_ioport.iop_pcdir &= ~0x0001;
1804 immap->im_ioport.iop_pcso &= ~0x0001;
1805 immap->im_ioport.iop_pcint |= 0x0001;
1806 cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
1807
1808 /* Make LEDS reflect Link status.
1809 */
1810 *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
1811#endif
1812#ifdef CONFIG_FADS
1813 if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0)
1814 panic("Could not allocate MII IRQ!");
1815#endif
1816} 1779}
1817 1780
1818static void __inline__ fec_get_mac(struct net_device *dev) 1781static void __inline__ fec_get_mac(struct net_device *dev)
@@ -1821,16 +1784,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
1821 1784
1822 bd = (bd_t *)__res; 1785 bd = (bd_t *)__res;
1823 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN); 1786 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN);
1824
1825#ifdef CONFIG_RPXCLASSIC
1826 /* The Embedded Planet boards have only one MAC address in
1827 * the EEPROM, but can have two Ethernet ports. For the
1828 * FEC port, we create another address by setting one of
1829 * the address bits above something that would have (up to
1830 * now) been allocated.
1831 */
1832 dev->dev_adrd[3] |= 0x80;
1833#endif
1834} 1787}
1835 1788
1836static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1789static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
@@ -2109,13 +2062,8 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
2109/* This interrupt occurs when the PHY detects a link change. 2062/* This interrupt occurs when the PHY detects a link change.
2110*/ 2063*/
2111#ifdef HAVE_mii_link_interrupt 2064#ifdef HAVE_mii_link_interrupt
2112#ifdef CONFIG_RPXCLASSIC
2113static void
2114mii_link_interrupt(void *dev_id)
2115#else
2116static irqreturn_t 2065static irqreturn_t
2117mii_link_interrupt(int irq, void * dev_id) 2066mii_link_interrupt(int irq, void * dev_id)
2118#endif
2119{ 2067{
2120 struct net_device *dev = dev_id; 2068 struct net_device *dev = dev_id;
2121 struct fec_enet_private *fep = netdev_priv(dev); 2069 struct fec_enet_private *fep = netdev_priv(dev);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 00527805e4f1..e5a6e2e84540 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -33,6 +33,7 @@
33*/ 33*/
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/moduleparam.h>
36#include <linux/types.h> 37#include <linux/types.h>
37#include <linux/errno.h> 38#include <linux/errno.h>
38#include <linux/ioport.h> 39#include <linux/ioport.h>
@@ -52,7 +53,9 @@
52#include <asm/hvcall.h> 53#include <asm/hvcall.h>
53#include <asm/atomic.h> 54#include <asm/atomic.h>
54#include <asm/vio.h> 55#include <asm/vio.h>
56#include <asm/iommu.h>
55#include <asm/uaccess.h> 57#include <asm/uaccess.h>
58#include <asm/firmware.h>
56#include <linux/seq_file.h> 59#include <linux/seq_file.h>
57 60
58#include "ibmveth.h" 61#include "ibmveth.h"
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
94static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
95static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
96static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 99static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
100static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
97static struct kobj_type ktype_veth_pool; 101static struct kobj_type ktype_veth_pool;
98 102
103
99#ifdef CONFIG_PROC_FS 104#ifdef CONFIG_PROC_FS
100#define IBMVETH_PROC_DIR "ibmveth" 105#define IBMVETH_PROC_DIR "ibmveth"
101static struct proc_dir_entry *ibmveth_proc_dir; 106static struct proc_dir_entry *ibmveth_proc_dir;
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
226 u32 i; 231 u32 i;
227 u32 count = pool->size - atomic_read(&pool->available); 232 u32 count = pool->size - atomic_read(&pool->available);
228 u32 buffers_added = 0; 233 u32 buffers_added = 0;
234 struct sk_buff *skb;
235 unsigned int free_index, index;
236 u64 correlator;
237 unsigned long lpar_rc;
238 dma_addr_t dma_addr;
229 239
230 mb(); 240 mb();
231 241
232 for(i = 0; i < count; ++i) { 242 for(i = 0; i < count; ++i) {
233 struct sk_buff *skb;
234 unsigned int free_index, index;
235 u64 correlator;
236 union ibmveth_buf_desc desc; 243 union ibmveth_buf_desc desc;
237 unsigned long lpar_rc;
238 dma_addr_t dma_addr;
239 244
240 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 245 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
241 246
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
255 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
256 pool->buff_size, DMA_FROM_DEVICE); 261 pool->buff_size, DMA_FROM_DEVICE);
257 262
263 if (dma_mapping_error(dma_addr))
264 goto failure;
265
258 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
259 pool->dma_addr[index] = dma_addr; 267 pool->dma_addr[index] = dma_addr;
260 pool->skbuff[index] = skb; 268 pool->skbuff[index] = skb;
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
267 275
268 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 276 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
269 277
270 if(lpar_rc != H_SUCCESS) { 278 if (lpar_rc != H_SUCCESS)
271 pool->free_map[free_index] = index; 279 goto failure;
272 pool->skbuff[index] = NULL; 280 else {
273 if (pool->consumer_index == 0)
274 pool->consumer_index = pool->size - 1;
275 else
276 pool->consumer_index--;
277 dma_unmap_single(&adapter->vdev->dev,
278 pool->dma_addr[index], pool->buff_size,
279 DMA_FROM_DEVICE);
280 dev_kfree_skb_any(skb);
281 adapter->replenish_add_buff_failure++;
282 break;
283 } else {
284 buffers_added++; 281 buffers_added++;
285 adapter->replenish_add_buff_success++; 282 adapter->replenish_add_buff_success++;
286 } 283 }
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
288 285
289 mb(); 286 mb();
290 atomic_add(buffers_added, &(pool->available)); 287 atomic_add(buffers_added, &(pool->available));
288 return;
289
290failure:
291 pool->free_map[free_index] = index;
292 pool->skbuff[index] = NULL;
293 if (pool->consumer_index == 0)
294 pool->consumer_index = pool->size - 1;
295 else
296 pool->consumer_index--;
297 if (!dma_mapping_error(dma_addr))
298 dma_unmap_single(&adapter->vdev->dev,
299 pool->dma_addr[index], pool->buff_size,
300 DMA_FROM_DEVICE);
301 dev_kfree_skb_any(skb);
302 adapter->replenish_add_buff_failure++;
303
304 mb();
305 atomic_add(buffers_added, &(pool->available));
291} 306}
292 307
293/* replenish routine */ 308/* replenish routine */
@@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
297 312
298 adapter->replenish_task_cycles++; 313 adapter->replenish_task_cycles++;
299 314
300 for(i = 0; i < IbmVethNumBufferPools; i++) 315 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
301 if(adapter->rx_buff_pool[i].active) 316 if(adapter->rx_buff_pool[i].active)
302 ibmveth_replenish_buffer_pool(adapter, 317 ibmveth_replenish_buffer_pool(adapter,
303 &adapter->rx_buff_pool[i]); 318 &adapter->rx_buff_pool[i]);
@@ -472,6 +487,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
472 if (adapter->rx_buff_pool[i].active) 487 if (adapter->rx_buff_pool[i].active)
473 ibmveth_free_buffer_pool(adapter, 488 ibmveth_free_buffer_pool(adapter,
474 &adapter->rx_buff_pool[i]); 489 &adapter->rx_buff_pool[i]);
490
491 if (adapter->bounce_buffer != NULL) {
492 if (!dma_mapping_error(adapter->bounce_buffer_dma)) {
493 dma_unmap_single(&adapter->vdev->dev,
494 adapter->bounce_buffer_dma,
495 adapter->netdev->mtu + IBMVETH_BUFF_OH,
496 DMA_BIDIRECTIONAL);
497 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
498 }
499 kfree(adapter->bounce_buffer);
500 adapter->bounce_buffer = NULL;
501 }
475} 502}
476 503
477static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, 504static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
@@ -607,6 +634,24 @@ static int ibmveth_open(struct net_device *netdev)
607 return rc; 634 return rc;
608 } 635 }
609 636
637 adapter->bounce_buffer =
638 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
639 if (!adapter->bounce_buffer) {
640 ibmveth_error_printk("unable to allocate bounce buffer\n");
641 ibmveth_cleanup(adapter);
642 napi_disable(&adapter->napi);
643 return -ENOMEM;
644 }
645 adapter->bounce_buffer_dma =
646 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
647 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
648 if (dma_mapping_error(adapter->bounce_buffer_dma)) {
649 ibmveth_error_printk("unable to map bounce buffer\n");
650 ibmveth_cleanup(adapter);
651 napi_disable(&adapter->napi);
652 return -ENOMEM;
653 }
654
610 ibmveth_debug_printk("initial replenish cycle\n"); 655 ibmveth_debug_printk("initial replenish cycle\n");
611 ibmveth_interrupt(netdev->irq, netdev); 656 ibmveth_interrupt(netdev->irq, netdev);
612 657
@@ -853,10 +898,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
853 unsigned int tx_packets = 0; 898 unsigned int tx_packets = 0;
854 unsigned int tx_send_failed = 0; 899 unsigned int tx_send_failed = 0;
855 unsigned int tx_map_failed = 0; 900 unsigned int tx_map_failed = 0;
901 int used_bounce = 0;
902 unsigned long data_dma_addr;
856 903
857 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 904 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
858 desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 905 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
859 skb->len, DMA_TO_DEVICE); 906 skb->len, DMA_TO_DEVICE);
860 907
861 if (skb->ip_summed == CHECKSUM_PARTIAL && 908 if (skb->ip_summed == CHECKSUM_PARTIAL &&
862 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { 909 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -875,12 +922,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
875 buf[1] = 0; 922 buf[1] = 0;
876 } 923 }
877 924
878 if (dma_mapping_error(desc.fields.address)) { 925 if (dma_mapping_error(data_dma_addr)) {
879 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 926 if (!firmware_has_feature(FW_FEATURE_CMO))
927 ibmveth_error_printk("tx: unable to map xmit buffer\n");
928 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
929 skb->len);
930 desc.fields.address = adapter->bounce_buffer_dma;
880 tx_map_failed++; 931 tx_map_failed++;
881 tx_dropped++; 932 used_bounce = 1;
882 goto out; 933 } else
883 } 934 desc.fields.address = data_dma_addr;
884 935
885 /* send the frame. Arbitrarily set retrycount to 1024 */ 936 /* send the frame. Arbitrarily set retrycount to 1024 */
886 correlator = 0; 937 correlator = 0;
@@ -904,8 +955,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
904 netdev->trans_start = jiffies; 955 netdev->trans_start = jiffies;
905 } 956 }
906 957
907 dma_unmap_single(&adapter->vdev->dev, desc.fields.address, 958 if (!used_bounce)
908 skb->len, DMA_TO_DEVICE); 959 dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
960 skb->len, DMA_TO_DEVICE);
909 961
910out: spin_lock_irqsave(&adapter->stats_lock, flags); 962out: spin_lock_irqsave(&adapter->stats_lock, flags);
911 netdev->stats.tx_dropped += tx_dropped; 963 netdev->stats.tx_dropped += tx_dropped;
@@ -1053,9 +1105,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1053static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 1105static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1054{ 1106{
1055 struct ibmveth_adapter *adapter = dev->priv; 1107 struct ibmveth_adapter *adapter = dev->priv;
1108 struct vio_dev *viodev = adapter->vdev;
1056 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 1109 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1057 int reinit = 0; 1110 int i;
1058 int i, rc;
1059 1111
1060 if (new_mtu < IBMVETH_MAX_MTU) 1112 if (new_mtu < IBMVETH_MAX_MTU)
1061 return -EINVAL; 1113 return -EINVAL;
@@ -1067,23 +1119,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1067 if (i == IbmVethNumBufferPools) 1119 if (i == IbmVethNumBufferPools)
1068 return -EINVAL; 1120 return -EINVAL;
1069 1121
1122 /* Deactivate all the buffer pools so that the next loop can activate
1123 only the buffer pools necessary to hold the new MTU */
1124 for (i = 0; i < IbmVethNumBufferPools; i++)
1125 if (adapter->rx_buff_pool[i].active) {
1126 ibmveth_free_buffer_pool(adapter,
1127 &adapter->rx_buff_pool[i]);
1128 adapter->rx_buff_pool[i].active = 0;
1129 }
1130
1070 /* Look for an active buffer pool that can hold the new MTU */ 1131 /* Look for an active buffer pool that can hold the new MTU */
1071 for(i = 0; i<IbmVethNumBufferPools; i++) { 1132 for(i = 0; i<IbmVethNumBufferPools; i++) {
1072 if (!adapter->rx_buff_pool[i].active) { 1133 adapter->rx_buff_pool[i].active = 1;
1073 adapter->rx_buff_pool[i].active = 1;
1074 reinit = 1;
1075 }
1076 1134
1077 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1135 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1078 if (reinit && netif_running(adapter->netdev)) { 1136 if (netif_running(adapter->netdev)) {
1079 adapter->pool_config = 1; 1137 adapter->pool_config = 1;
1080 ibmveth_close(adapter->netdev); 1138 ibmveth_close(adapter->netdev);
1081 adapter->pool_config = 0; 1139 adapter->pool_config = 0;
1082 dev->mtu = new_mtu; 1140 dev->mtu = new_mtu;
1083 if ((rc = ibmveth_open(adapter->netdev))) 1141 vio_cmo_set_dev_desired(viodev,
1084 return rc; 1142 ibmveth_get_desired_dma
1085 } else 1143 (viodev));
1086 dev->mtu = new_mtu; 1144 return ibmveth_open(adapter->netdev);
1145 }
1146 dev->mtu = new_mtu;
1147 vio_cmo_set_dev_desired(viodev,
1148 ibmveth_get_desired_dma
1149 (viodev));
1087 return 0; 1150 return 0;
1088 } 1151 }
1089 } 1152 }
@@ -1098,6 +1161,46 @@ static void ibmveth_poll_controller(struct net_device *dev)
1098} 1161}
1099#endif 1162#endif
1100 1163
1164/**
1165 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1166 *
1167 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1168 *
1169 * Return value:
1170 * Number of bytes of IO data the driver will need to perform well.
1171 */
1172static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1173{
1174 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1175 struct ibmveth_adapter *adapter;
1176 unsigned long ret;
1177 int i;
1178 int rxqentries = 1;
1179
1180 /* netdev inits at probe time along with the structures we need below*/
1181 if (netdev == NULL)
1182 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1183
1184 adapter = netdev_priv(netdev);
1185
1186 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1187 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1188
1189 for (i = 0; i < IbmVethNumBufferPools; i++) {
1190 /* add the size of the active receive buffers */
1191 if (adapter->rx_buff_pool[i].active)
1192 ret +=
1193 adapter->rx_buff_pool[i].size *
1194 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1195 buff_size);
1196 rxqentries += adapter->rx_buff_pool[i].size;
1197 }
1198 /* add the size of the receive queue entries */
1199 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1200
1201 return ret;
1202}
1203
1101static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 1204static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1102{ 1205{
1103 int rc, i; 1206 int rc, i;
@@ -1242,6 +1345,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1242 ibmveth_proc_unregister_adapter(adapter); 1345 ibmveth_proc_unregister_adapter(adapter);
1243 1346
1244 free_netdev(netdev); 1347 free_netdev(netdev);
1348 dev_set_drvdata(&dev->dev, NULL);
1349
1245 return 0; 1350 return 0;
1246} 1351}
1247 1352
@@ -1402,14 +1507,15 @@ const char * buf, size_t count)
1402 return -EPERM; 1507 return -EPERM;
1403 } 1508 }
1404 1509
1405 pool->active = 0;
1406 if (netif_running(netdev)) { 1510 if (netif_running(netdev)) {
1407 adapter->pool_config = 1; 1511 adapter->pool_config = 1;
1408 ibmveth_close(netdev); 1512 ibmveth_close(netdev);
1513 pool->active = 0;
1409 adapter->pool_config = 0; 1514 adapter->pool_config = 0;
1410 if ((rc = ibmveth_open(netdev))) 1515 if ((rc = ibmveth_open(netdev)))
1411 return rc; 1516 return rc;
1412 } 1517 }
1518 pool->active = 0;
1413 } 1519 }
1414 } else if (attr == &veth_num_attr) { 1520 } else if (attr == &veth_num_attr) {
1415 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) 1521 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
@@ -1485,6 +1591,7 @@ static struct vio_driver ibmveth_driver = {
1485 .id_table = ibmveth_device_table, 1591 .id_table = ibmveth_device_table,
1486 .probe = ibmveth_probe, 1592 .probe = ibmveth_probe,
1487 .remove = ibmveth_remove, 1593 .remove = ibmveth_remove,
1594 .get_desired_dma = ibmveth_get_desired_dma,
1488 .driver = { 1595 .driver = {
1489 .name = ibmveth_driver_name, 1596 .name = ibmveth_driver_name,
1490 .owner = THIS_MODULE, 1597 .owner = THIS_MODULE,
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 41f61cd18852..d28186948752 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -93,9 +93,12 @@ static inline long h_illan_attributes(unsigned long unit_address,
93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
94 94
95#define IbmVethNumBufferPools 5 95#define IbmVethNumBufferPools 5
96#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
96#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ 97#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
97#define IBMVETH_MAX_MTU 68 98#define IBMVETH_MAX_MTU 68
98#define IBMVETH_MAX_POOL_COUNT 4096 99#define IBMVETH_MAX_POOL_COUNT 4096
100#define IBMVETH_BUFF_LIST_SIZE 4096
101#define IBMVETH_FILT_LIST_SIZE 4096
99#define IBMVETH_MAX_BUF_SIZE (1024 * 128) 102#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
100 103
101static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; 104static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
@@ -143,6 +146,8 @@ struct ibmveth_adapter {
143 struct ibmveth_rx_q rx_queue; 146 struct ibmveth_rx_q rx_queue;
144 int pool_config; 147 int pool_config;
145 int rx_csum; 148 int rx_csum;
149 void *bounce_buffer;
150 dma_addr_t bounce_buffer_dma;
146 151
147 /* adapter specific stats */ 152 /* adapter specific stats */
148 u64 replenish_task_cycles; 153 u64 replenish_task_cycles;
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 70dff94a8bc6..04d5bc69a6f8 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -67,6 +67,8 @@ enum {
67 CMD_STAT_BAD_INDEX = 0x0a, 67 CMD_STAT_BAD_INDEX = 0x0a,
68 /* FW image corrupted: */ 68 /* FW image corrupted: */
69 CMD_STAT_BAD_NVMEM = 0x0b, 69 CMD_STAT_BAD_NVMEM = 0x0b,
70 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
71 CMD_STAT_ICM_ERROR = 0x0c,
70 /* Attempt to modify a QP/EE which is not in the presumed state: */ 72 /* Attempt to modify a QP/EE which is not in the presumed state: */
71 CMD_STAT_BAD_QP_STATE = 0x10, 73 CMD_STAT_BAD_QP_STATE = 0x10,
72 /* Bad segment parameters (Address/Size): */ 74 /* Bad segment parameters (Address/Size): */
@@ -119,6 +121,7 @@ static int mlx4_status_to_errno(u8 status)
119 [CMD_STAT_BAD_RES_STATE] = -EBADF, 121 [CMD_STAT_BAD_RES_STATE] = -EBADF,
120 [CMD_STAT_BAD_INDEX] = -EBADF, 122 [CMD_STAT_BAD_INDEX] = -EBADF,
121 [CMD_STAT_BAD_NVMEM] = -EFAULT, 123 [CMD_STAT_BAD_NVMEM] = -EFAULT,
124 [CMD_STAT_ICM_ERROR] = -ENFILE,
122 [CMD_STAT_BAD_QP_STATE] = -EINVAL, 125 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
123 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, 126 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
124 [CMD_STAT_REG_BOUND] = -EBUSY, 127 [CMD_STAT_REG_BOUND] = -EBUSY,
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index e141a1513f07..ea3a09aaa844 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/mm.h>
36#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
37 38
38#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 2b5006b9be67..57278224ba1e 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -46,6 +46,10 @@ enum {
46extern void __buggy_use_of_MLX4_GET(void); 46extern void __buggy_use_of_MLX4_GET(void);
47extern void __buggy_use_of_MLX4_PUT(void); 47extern void __buggy_use_of_MLX4_PUT(void);
48 48
49static int enable_qos;
50module_param(enable_qos, bool, 0444);
51MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
52
49#define MLX4_GET(dest, source, offset) \ 53#define MLX4_GET(dest, source, offset) \
50 do { \ 54 do { \
51 void *__p = (char *) (source) + (offset); \ 55 void *__p = (char *) (source) + (offset); \
@@ -198,7 +202,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
198#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 202#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
199#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 203#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
200#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 204#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
201#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97 205#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
202#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 206#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
203#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 207#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
204 208
@@ -373,12 +377,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
373 } 377 }
374 } 378 }
375 379
376 if (dev_cap->bmme_flags & 1) 380 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
377 mlx4_dbg(dev, "Base MM extensions: yes " 381 dev_cap->bmme_flags, dev_cap->reserved_lkey);
378 "(flags %d, rsvd L_Key %08x)\n",
379 dev_cap->bmme_flags, dev_cap->reserved_lkey);
380 else
381 mlx4_dbg(dev, "Base MM extensions: no\n");
382 382
383 /* 383 /*
384 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 384 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
@@ -737,6 +737,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
737 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 737 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
738 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 738 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
739 739
740 /* Enable QoS support if module parameter set */
741 if (enable_qos)
742 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
743
740 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 744 /* QPC/EEC/CQC/EQC/RDMARC attributes */
741 745
742 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 746 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index a0e046c149b7..fbf0e22be122 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -98,7 +98,7 @@ struct mlx4_dev_cap {
98 int cmpt_entry_sz; 98 int cmpt_entry_sz;
99 int mtt_entry_sz; 99 int mtt_entry_sz;
100 int resize_srq; 100 int resize_srq;
101 u8 bmme_flags; 101 u32 bmme_flags;
102 u32 reserved_lkey; 102 u32 reserved_lkey;
103 u64 max_icm_sz; 103 u64 max_icm_sz;
104 int max_gso_sz; 104 int max_gso_sz;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index d3736013fe9b..8e1d24cda1b0 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -158,6 +158,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
158 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 158 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
160 dev->caps.flags = dev_cap->flags; 160 dev->caps.flags = dev_cap->flags;
161 dev->caps.bmme_flags = dev_cap->bmme_flags;
162 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
161 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 163 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
162 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 164 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
163 165
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index a4023c2dd050..78038499cff5 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -118,6 +118,7 @@ struct mlx4_bitmap {
118 118
119struct mlx4_buddy { 119struct mlx4_buddy {
120 unsigned long **bits; 120 unsigned long **bits;
121 unsigned int *num_free;
121 int max_order; 122 int max_order;
122 spinlock_t lock; 123 spinlock_t lock;
123}; 124};
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 03a9abcce524..a3c04c5f12c2 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -47,7 +47,7 @@ struct mlx4_mpt_entry {
47 __be32 flags; 47 __be32 flags;
48 __be32 qpn; 48 __be32 qpn;
49 __be32 key; 49 __be32 key;
50 __be32 pd; 50 __be32 pd_flags;
51 __be64 start; 51 __be64 start;
52 __be64 length; 52 __be64 length;
53 __be32 lkey; 53 __be32 lkey;
@@ -61,11 +61,15 @@ struct mlx4_mpt_entry {
61} __attribute__((packed)); 61} __attribute__((packed));
62 62
63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
64#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
64#define MLX4_MPT_FLAG_MIO (1 << 17) 65#define MLX4_MPT_FLAG_MIO (1 << 17)
65#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 66#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
66#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 67#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
67#define MLX4_MPT_FLAG_REGION (1 << 8) 68#define MLX4_MPT_FLAG_REGION (1 << 8)
68 69
70#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 26)
71#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
72
69#define MLX4_MTT_FLAG_PRESENT 1 73#define MLX4_MTT_FLAG_PRESENT 1
70 74
71#define MLX4_MPT_STATUS_SW 0xF0 75#define MLX4_MPT_STATUS_SW 0xF0
@@ -79,23 +83,26 @@ static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
79 83
80 spin_lock(&buddy->lock); 84 spin_lock(&buddy->lock);
81 85
82 for (o = order; o <= buddy->max_order; ++o) { 86 for (o = order; o <= buddy->max_order; ++o)
83 m = 1 << (buddy->max_order - o); 87 if (buddy->num_free[o]) {
84 seg = find_first_bit(buddy->bits[o], m); 88 m = 1 << (buddy->max_order - o);
85 if (seg < m) 89 seg = find_first_bit(buddy->bits[o], m);
86 goto found; 90 if (seg < m)
87 } 91 goto found;
92 }
88 93
89 spin_unlock(&buddy->lock); 94 spin_unlock(&buddy->lock);
90 return -1; 95 return -1;
91 96
92 found: 97 found:
93 clear_bit(seg, buddy->bits[o]); 98 clear_bit(seg, buddy->bits[o]);
99 --buddy->num_free[o];
94 100
95 while (o > order) { 101 while (o > order) {
96 --o; 102 --o;
97 seg <<= 1; 103 seg <<= 1;
98 set_bit(seg ^ 1, buddy->bits[o]); 104 set_bit(seg ^ 1, buddy->bits[o]);
105 ++buddy->num_free[o];
99 } 106 }
100 107
101 spin_unlock(&buddy->lock); 108 spin_unlock(&buddy->lock);
@@ -113,11 +120,13 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
113 120
114 while (test_bit(seg ^ 1, buddy->bits[order])) { 121 while (test_bit(seg ^ 1, buddy->bits[order])) {
115 clear_bit(seg ^ 1, buddy->bits[order]); 122 clear_bit(seg ^ 1, buddy->bits[order]);
123 --buddy->num_free[order];
116 seg >>= 1; 124 seg >>= 1;
117 ++order; 125 ++order;
118 } 126 }
119 127
120 set_bit(seg, buddy->bits[order]); 128 set_bit(seg, buddy->bits[order]);
129 ++buddy->num_free[order];
121 130
122 spin_unlock(&buddy->lock); 131 spin_unlock(&buddy->lock);
123} 132}
@@ -131,7 +140,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
131 140
132 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 141 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
133 GFP_KERNEL); 142 GFP_KERNEL);
134 if (!buddy->bits) 143 buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
144 GFP_KERNEL);
145 if (!buddy->bits || !buddy->num_free)
135 goto err_out; 146 goto err_out;
136 147
137 for (i = 0; i <= buddy->max_order; ++i) { 148 for (i = 0; i <= buddy->max_order; ++i) {
@@ -143,6 +154,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
143 } 154 }
144 155
145 set_bit(0, buddy->bits[buddy->max_order]); 156 set_bit(0, buddy->bits[buddy->max_order]);
157 buddy->num_free[buddy->max_order] = 1;
146 158
147 return 0; 159 return 0;
148 160
@@ -150,9 +162,10 @@ err_out_free:
150 for (i = 0; i <= buddy->max_order; ++i) 162 for (i = 0; i <= buddy->max_order; ++i)
151 kfree(buddy->bits[i]); 163 kfree(buddy->bits[i]);
152 164
165err_out:
153 kfree(buddy->bits); 166 kfree(buddy->bits);
167 kfree(buddy->num_free);
154 168
155err_out:
156 return -ENOMEM; 169 return -ENOMEM;
157} 170}
158 171
@@ -164,6 +177,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
164 kfree(buddy->bits[i]); 177 kfree(buddy->bits[i]);
165 178
166 kfree(buddy->bits); 179 kfree(buddy->bits);
180 kfree(buddy->num_free);
167} 181}
168 182
169static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 183static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
@@ -314,21 +328,30 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
314 328
315 memset(mpt_entry, 0, sizeof *mpt_entry); 329 memset(mpt_entry, 0, sizeof *mpt_entry);
316 330
317 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS | 331 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
318 MLX4_MPT_FLAG_MIO |
319 MLX4_MPT_FLAG_REGION | 332 MLX4_MPT_FLAG_REGION |
320 mr->access); 333 mr->access);
321 334
322 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 335 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
323 mpt_entry->pd = cpu_to_be32(mr->pd); 336 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
324 mpt_entry->start = cpu_to_be64(mr->iova); 337 mpt_entry->start = cpu_to_be64(mr->iova);
325 mpt_entry->length = cpu_to_be64(mr->size); 338 mpt_entry->length = cpu_to_be64(mr->size);
326 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 339 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
340
327 if (mr->mtt.order < 0) { 341 if (mr->mtt.order < 0) {
328 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 342 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
329 mpt_entry->mtt_seg = 0; 343 mpt_entry->mtt_seg = 0;
330 } else 344 } else {
331 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); 345 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
346 }
347
348 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
349 /* fast register MR in free state */
350 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
351 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG);
352 } else {
353 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
354 }
332 355
333 err = mlx4_SW2HW_MPT(dev, mailbox, 356 err = mlx4_SW2HW_MPT(dev, mailbox,
334 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 357 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 3a93c5f0f7ab..aa616892d09c 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -91,6 +91,13 @@ EXPORT_SYMBOL_GPL(mlx4_uar_free);
91 91
92int mlx4_init_uar_table(struct mlx4_dev *dev) 92int mlx4_init_uar_table(struct mlx4_dev *dev)
93{ 93{
94 if (dev->caps.num_uars <= 128) {
95 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
96 dev->caps.num_uars);
97 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
98 return -ENODEV;
99 }
100
94 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, 101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
95 dev->caps.num_uars, dev->caps.num_uars - 1, 102 dev->caps.num_uars, dev->caps.num_uars - 1,
96 max(128, dev->caps.reserved_uars)); 103 max(128, dev->caps.reserved_uars));
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 711e4a8948e0..5257cf464f1a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1829,9 +1829,6 @@ static int sky2_down(struct net_device *dev)
1829 if (netif_msg_ifdown(sky2)) 1829 if (netif_msg_ifdown(sky2))
1830 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1830 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1831 1831
1832 /* Stop more packets from being queued */
1833 netif_stop_queue(dev);
1834
1835 /* Disable port IRQ */ 1832 /* Disable port IRQ */
1836 imask = sky2_read32(hw, B0_IMSK); 1833 imask = sky2_read32(hw, B0_IMSK);
1837 imask &= ~portirq_msk[port]; 1834 imask &= ~portirq_msk[port];
@@ -1887,8 +1884,6 @@ static int sky2_down(struct net_device *dev)
1887 1884
1888 sky2_phy_power_down(hw, port); 1885 sky2_phy_power_down(hw, port);
1889 1886
1890 netif_carrier_off(dev);
1891
1892 /* turn off LED's */ 1887 /* turn off LED's */
1893 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 1888 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1894 1889
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c28d7cb2035b..0196a0df9021 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -19,6 +19,7 @@
19//#define DEBUG 19//#define DEBUG
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/virtio.h> 24#include <linux/virtio.h>
24#include <linux/virtio_net.h> 25#include <linux/virtio_net.h>
@@ -54,9 +55,15 @@ struct virtnet_info
54 struct tasklet_struct tasklet; 55 struct tasklet_struct tasklet;
55 bool free_in_tasklet; 56 bool free_in_tasklet;
56 57
58 /* I like... big packets and I cannot lie! */
59 bool big_packets;
60
57 /* Receive & send queues. */ 61 /* Receive & send queues. */
58 struct sk_buff_head recv; 62 struct sk_buff_head recv;
59 struct sk_buff_head send; 63 struct sk_buff_head send;
64
65 /* Chain pages by the private ptr. */
66 struct page *pages;
60}; 67};
61 68
62static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) 69static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
@@ -69,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
69 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); 76 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
70} 77}
71 78
79static void give_a_page(struct virtnet_info *vi, struct page *page)
80{
81 page->private = (unsigned long)vi->pages;
82 vi->pages = page;
83}
84
85static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
86{
87 struct page *p = vi->pages;
88
89 if (p)
90 vi->pages = (struct page *)p->private;
91 else
92 p = alloc_page(gfp_mask);
93 return p;
94}
95
72static void skb_xmit_done(struct virtqueue *svq) 96static void skb_xmit_done(struct virtqueue *svq)
73{ 97{
74 struct virtnet_info *vi = svq->vdev->priv; 98 struct virtnet_info *vi = svq->vdev->priv;
@@ -88,6 +112,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
88 unsigned len) 112 unsigned len)
89{ 113{
90 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 114 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
115 int err;
91 116
92 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 117 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
93 pr_debug("%s: short packet %i\n", dev->name, len); 118 pr_debug("%s: short packet %i\n", dev->name, len);
@@ -95,10 +120,23 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
95 goto drop; 120 goto drop;
96 } 121 }
97 len -= sizeof(struct virtio_net_hdr); 122 len -= sizeof(struct virtio_net_hdr);
98 BUG_ON(len > MAX_PACKET_LEN);
99 123
100 skb_trim(skb, len); 124 if (len <= MAX_PACKET_LEN) {
125 unsigned int i;
101 126
127 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
128 give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page);
129 skb->data_len = 0;
130 skb_shinfo(skb)->nr_frags = 0;
131 }
132
133 err = pskb_trim(skb, len);
134 if (err) {
135 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err);
136 dev->stats.rx_dropped++;
137 goto drop;
138 }
139 skb->truesize += skb->data_len;
102 dev->stats.rx_bytes += skb->len; 140 dev->stats.rx_bytes += skb->len;
103 dev->stats.rx_packets++; 141 dev->stats.rx_packets++;
104 142
@@ -160,7 +198,7 @@ static void try_fill_recv(struct virtnet_info *vi)
160{ 198{
161 struct sk_buff *skb; 199 struct sk_buff *skb;
162 struct scatterlist sg[2+MAX_SKB_FRAGS]; 200 struct scatterlist sg[2+MAX_SKB_FRAGS];
163 int num, err; 201 int num, err, i;
164 202
165 sg_init_table(sg, 2+MAX_SKB_FRAGS); 203 sg_init_table(sg, 2+MAX_SKB_FRAGS);
166 for (;;) { 204 for (;;) {
@@ -170,6 +208,24 @@ static void try_fill_recv(struct virtnet_info *vi)
170 208
171 skb_put(skb, MAX_PACKET_LEN); 209 skb_put(skb, MAX_PACKET_LEN);
172 vnet_hdr_to_sg(sg, skb); 210 vnet_hdr_to_sg(sg, skb);
211
212 if (vi->big_packets) {
213 for (i = 0; i < MAX_SKB_FRAGS; i++) {
214 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
215 f->page = get_a_page(vi, GFP_ATOMIC);
216 if (!f->page)
217 break;
218
219 f->page_offset = 0;
220 f->size = PAGE_SIZE;
221
222 skb->data_len += PAGE_SIZE;
223 skb->len += PAGE_SIZE;
224
225 skb_shinfo(skb)->nr_frags++;
226 }
227 }
228
173 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 229 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
174 skb_queue_head(&vi->recv, skb); 230 skb_queue_head(&vi->recv, skb);
175 231
@@ -335,16 +391,11 @@ again:
335 free_old_xmit_skbs(vi); 391 free_old_xmit_skbs(vi);
336 392
337 /* If we has a buffer left over from last time, send it now. */ 393 /* If we has a buffer left over from last time, send it now. */
338 if (unlikely(vi->last_xmit_skb)) { 394 if (unlikely(vi->last_xmit_skb) &&
339 if (xmit_skb(vi, vi->last_xmit_skb) != 0) { 395 xmit_skb(vi, vi->last_xmit_skb) != 0)
340 /* Drop this skb: we only queue one. */ 396 goto stop_queue;
341 vi->dev->stats.tx_dropped++; 397
342 kfree_skb(skb); 398 vi->last_xmit_skb = NULL;
343 skb = NULL;
344 goto stop_queue;
345 }
346 vi->last_xmit_skb = NULL;
347 }
348 399
349 /* Put new one in send queue and do transmit */ 400 /* Put new one in send queue and do transmit */
350 if (likely(skb)) { 401 if (likely(skb)) {
@@ -370,6 +421,11 @@ stop_queue:
370 netif_start_queue(dev); 421 netif_start_queue(dev);
371 goto again; 422 goto again;
372 } 423 }
424 if (skb) {
425 /* Drop this skb: we only queue one. */
426 vi->dev->stats.tx_dropped++;
427 kfree_skb(skb);
428 }
373 goto done; 429 goto done;
374} 430}
375 431
@@ -408,6 +464,22 @@ static int virtnet_close(struct net_device *dev)
408 return 0; 464 return 0;
409} 465}
410 466
467static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
468{
469 struct virtnet_info *vi = netdev_priv(dev);
470 struct virtio_device *vdev = vi->vdev;
471
472 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
473 return -ENOSYS;
474
475 return ethtool_op_set_tx_hw_csum(dev, data);
476}
477
478static struct ethtool_ops virtnet_ethtool_ops = {
479 .set_tx_csum = virtnet_set_tx_csum,
480 .set_sg = ethtool_op_set_sg,
481};
482
411static int virtnet_probe(struct virtio_device *vdev) 483static int virtnet_probe(struct virtio_device *vdev)
412{ 484{
413 int err; 485 int err;
@@ -427,6 +499,7 @@ static int virtnet_probe(struct virtio_device *vdev)
427#ifdef CONFIG_NET_POLL_CONTROLLER 499#ifdef CONFIG_NET_POLL_CONTROLLER
428 dev->poll_controller = virtnet_netpoll; 500 dev->poll_controller = virtnet_netpoll;
429#endif 501#endif
502 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
430 SET_NETDEV_DEV(dev, &vdev->dev); 503 SET_NETDEV_DEV(dev, &vdev->dev);
431 504
432 /* Do we support "hardware" checksums? */ 505 /* Do we support "hardware" checksums? */
@@ -462,11 +535,18 @@ static int virtnet_probe(struct virtio_device *vdev)
462 vi->dev = dev; 535 vi->dev = dev;
463 vi->vdev = vdev; 536 vi->vdev = vdev;
464 vdev->priv = vi; 537 vdev->priv = vi;
538 vi->pages = NULL;
465 539
466 /* If they give us a callback when all buffers are done, we don't need 540 /* If they give us a callback when all buffers are done, we don't need
467 * the timer. */ 541 * the timer. */
468 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); 542 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
469 543
544 /* If we can receive ANY GSO packets, we must allocate large ones. */
545 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
546 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
547 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
548 vi->big_packets = true;
549
470 /* We expect two virtqueues, receive then send. */ 550 /* We expect two virtqueues, receive then send. */
471 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 551 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
472 if (IS_ERR(vi->rvq)) { 552 if (IS_ERR(vi->rvq)) {
@@ -541,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev)
541 vdev->config->del_vq(vi->svq); 621 vdev->config->del_vq(vi->svq);
542 vdev->config->del_vq(vi->rvq); 622 vdev->config->del_vq(vi->rvq);
543 unregister_netdev(vi->dev); 623 unregister_netdev(vi->dev);
624
625 while (vi->pages)
626 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
627
544 free_netdev(vi->dev); 628 free_netdev(vi->dev);
545} 629}
546 630
@@ -553,7 +637,9 @@ static unsigned int features[] = {
553 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 637 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
554 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 638 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
555 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 639 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
556 VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, 640 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
641 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
642 VIRTIO_F_NOTIFY_ON_EMPTY,
557}; 643};
558 644
559static struct virtio_driver virtio_net = { 645static struct virtio_driver virtio_net = {