aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
authorRobert Jennings <rcj@linux.vnet.ibm.com>2008-07-23 14:34:52 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-25 01:44:44 -0400
commit1096d63d8e7d226630706e15648705d0187787e4 (patch)
tree0233951b529ee7b537d527e6d57c8e5d80d48ae7 /drivers/net/ibmveth.c
parentea866e6526b8a2ead92875732d41b26fdb470312 (diff)
ibmveth: enable driver for CMO
Enable ibmveth for Cooperative Memory Overcommitment (CMO). For this driver it means calculating a desired amount of IO memory based on the current MTU and updating this value with the bus when MTU changes occur. Because DMA mappings can fail, we have added a bounce buffer for temporary cases where the driver can not map IO memory for the buffer pool. The following changes are made to enable the driver for CMO: * DMA mapping errors will not result in error messages if entitlement has been exceeded and resources were not available. * DMA mapping errors are handled gracefully, ibmveth_replenish_buffer_pool() is corrected to check the return from dma_map_single and fail gracefully. * The driver will have a get_desired_dma function defined to function in a CMO environment. * When the MTU is changed, the driver will update the device IO entitlement Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com> Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: Santiago Leon <santil@us.ibm.com> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c169
1 files changed, 135 insertions, 34 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 007ca8735a94..e5a6e2e84540 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -33,6 +33,7 @@
33*/ 33*/
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/moduleparam.h>
36#include <linux/types.h> 37#include <linux/types.h>
37#include <linux/errno.h> 38#include <linux/errno.h>
38#include <linux/ioport.h> 39#include <linux/ioport.h>
@@ -52,7 +53,9 @@
52#include <asm/hvcall.h> 53#include <asm/hvcall.h>
53#include <asm/atomic.h> 54#include <asm/atomic.h>
54#include <asm/vio.h> 55#include <asm/vio.h>
56#include <asm/iommu.h>
55#include <asm/uaccess.h> 57#include <asm/uaccess.h>
58#include <asm/firmware.h>
56#include <linux/seq_file.h> 59#include <linux/seq_file.h>
57 60
58#include "ibmveth.h" 61#include "ibmveth.h"
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
94static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
95static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
96static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 99static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
100static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
97static struct kobj_type ktype_veth_pool; 101static struct kobj_type ktype_veth_pool;
98 102
103
99#ifdef CONFIG_PROC_FS 104#ifdef CONFIG_PROC_FS
100#define IBMVETH_PROC_DIR "ibmveth" 105#define IBMVETH_PROC_DIR "ibmveth"
101static struct proc_dir_entry *ibmveth_proc_dir; 106static struct proc_dir_entry *ibmveth_proc_dir;
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
226 u32 i; 231 u32 i;
227 u32 count = pool->size - atomic_read(&pool->available); 232 u32 count = pool->size - atomic_read(&pool->available);
228 u32 buffers_added = 0; 233 u32 buffers_added = 0;
234 struct sk_buff *skb;
235 unsigned int free_index, index;
236 u64 correlator;
237 unsigned long lpar_rc;
238 dma_addr_t dma_addr;
229 239
230 mb(); 240 mb();
231 241
232 for(i = 0; i < count; ++i) { 242 for(i = 0; i < count; ++i) {
233 struct sk_buff *skb;
234 unsigned int free_index, index;
235 u64 correlator;
236 union ibmveth_buf_desc desc; 243 union ibmveth_buf_desc desc;
237 unsigned long lpar_rc;
238 dma_addr_t dma_addr;
239 244
240 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 245 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
241 246
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
255 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
256 pool->buff_size, DMA_FROM_DEVICE); 261 pool->buff_size, DMA_FROM_DEVICE);
257 262
263 if (dma_mapping_error(dma_addr))
264 goto failure;
265
258 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
259 pool->dma_addr[index] = dma_addr; 267 pool->dma_addr[index] = dma_addr;
260 pool->skbuff[index] = skb; 268 pool->skbuff[index] = skb;
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
267 275
268 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 276 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
269 277
270 if(lpar_rc != H_SUCCESS) { 278 if (lpar_rc != H_SUCCESS)
271 pool->free_map[free_index] = index; 279 goto failure;
272 pool->skbuff[index] = NULL; 280 else {
273 if (pool->consumer_index == 0)
274 pool->consumer_index = pool->size - 1;
275 else
276 pool->consumer_index--;
277 dma_unmap_single(&adapter->vdev->dev,
278 pool->dma_addr[index], pool->buff_size,
279 DMA_FROM_DEVICE);
280 dev_kfree_skb_any(skb);
281 adapter->replenish_add_buff_failure++;
282 break;
283 } else {
284 buffers_added++; 281 buffers_added++;
285 adapter->replenish_add_buff_success++; 282 adapter->replenish_add_buff_success++;
286 } 283 }
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
288 285
289 mb(); 286 mb();
290 atomic_add(buffers_added, &(pool->available)); 287 atomic_add(buffers_added, &(pool->available));
288 return;
289
290failure:
291 pool->free_map[free_index] = index;
292 pool->skbuff[index] = NULL;
293 if (pool->consumer_index == 0)
294 pool->consumer_index = pool->size - 1;
295 else
296 pool->consumer_index--;
297 if (!dma_mapping_error(dma_addr))
298 dma_unmap_single(&adapter->vdev->dev,
299 pool->dma_addr[index], pool->buff_size,
300 DMA_FROM_DEVICE);
301 dev_kfree_skb_any(skb);
302 adapter->replenish_add_buff_failure++;
303
304 mb();
305 atomic_add(buffers_added, &(pool->available));
291} 306}
292 307
293/* replenish routine */ 308/* replenish routine */
@@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
297 312
298 adapter->replenish_task_cycles++; 313 adapter->replenish_task_cycles++;
299 314
300 for(i = 0; i < IbmVethNumBufferPools; i++) 315 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
301 if(adapter->rx_buff_pool[i].active) 316 if(adapter->rx_buff_pool[i].active)
302 ibmveth_replenish_buffer_pool(adapter, 317 ibmveth_replenish_buffer_pool(adapter,
303 &adapter->rx_buff_pool[i]); 318 &adapter->rx_buff_pool[i]);
@@ -472,6 +487,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
472 if (adapter->rx_buff_pool[i].active) 487 if (adapter->rx_buff_pool[i].active)
473 ibmveth_free_buffer_pool(adapter, 488 ibmveth_free_buffer_pool(adapter,
474 &adapter->rx_buff_pool[i]); 489 &adapter->rx_buff_pool[i]);
490
491 if (adapter->bounce_buffer != NULL) {
492 if (!dma_mapping_error(adapter->bounce_buffer_dma)) {
493 dma_unmap_single(&adapter->vdev->dev,
494 adapter->bounce_buffer_dma,
495 adapter->netdev->mtu + IBMVETH_BUFF_OH,
496 DMA_BIDIRECTIONAL);
497 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
498 }
499 kfree(adapter->bounce_buffer);
500 adapter->bounce_buffer = NULL;
501 }
475} 502}
476 503
477static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, 504static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
@@ -607,6 +634,24 @@ static int ibmveth_open(struct net_device *netdev)
607 return rc; 634 return rc;
608 } 635 }
609 636
637 adapter->bounce_buffer =
638 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
639 if (!adapter->bounce_buffer) {
640 ibmveth_error_printk("unable to allocate bounce buffer\n");
641 ibmveth_cleanup(adapter);
642 napi_disable(&adapter->napi);
643 return -ENOMEM;
644 }
645 adapter->bounce_buffer_dma =
646 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
647 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
648 if (dma_mapping_error(adapter->bounce_buffer_dma)) {
649 ibmveth_error_printk("unable to map bounce buffer\n");
650 ibmveth_cleanup(adapter);
651 napi_disable(&adapter->napi);
652 return -ENOMEM;
653 }
654
610 ibmveth_debug_printk("initial replenish cycle\n"); 655 ibmveth_debug_printk("initial replenish cycle\n");
611 ibmveth_interrupt(netdev->irq, netdev); 656 ibmveth_interrupt(netdev->irq, netdev);
612 657
@@ -853,10 +898,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
853 unsigned int tx_packets = 0; 898 unsigned int tx_packets = 0;
854 unsigned int tx_send_failed = 0; 899 unsigned int tx_send_failed = 0;
855 unsigned int tx_map_failed = 0; 900 unsigned int tx_map_failed = 0;
901 int used_bounce = 0;
902 unsigned long data_dma_addr;
856 903
857 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 904 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
858 desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 905 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
859 skb->len, DMA_TO_DEVICE); 906 skb->len, DMA_TO_DEVICE);
860 907
861 if (skb->ip_summed == CHECKSUM_PARTIAL && 908 if (skb->ip_summed == CHECKSUM_PARTIAL &&
862 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { 909 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -875,12 +922,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
875 buf[1] = 0; 922 buf[1] = 0;
876 } 923 }
877 924
878 if (dma_mapping_error(desc.fields.address)) { 925 if (dma_mapping_error(data_dma_addr)) {
879 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 926 if (!firmware_has_feature(FW_FEATURE_CMO))
927 ibmveth_error_printk("tx: unable to map xmit buffer\n");
928 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
929 skb->len);
930 desc.fields.address = adapter->bounce_buffer_dma;
880 tx_map_failed++; 931 tx_map_failed++;
881 tx_dropped++; 932 used_bounce = 1;
882 goto out; 933 } else
883 } 934 desc.fields.address = data_dma_addr;
884 935
885 /* send the frame. Arbitrarily set retrycount to 1024 */ 936 /* send the frame. Arbitrarily set retrycount to 1024 */
886 correlator = 0; 937 correlator = 0;
@@ -904,8 +955,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
904 netdev->trans_start = jiffies; 955 netdev->trans_start = jiffies;
905 } 956 }
906 957
907 dma_unmap_single(&adapter->vdev->dev, desc.fields.address, 958 if (!used_bounce)
908 skb->len, DMA_TO_DEVICE); 959 dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
960 skb->len, DMA_TO_DEVICE);
909 961
910out: spin_lock_irqsave(&adapter->stats_lock, flags); 962out: spin_lock_irqsave(&adapter->stats_lock, flags);
911 netdev->stats.tx_dropped += tx_dropped; 963 netdev->stats.tx_dropped += tx_dropped;
@@ -1053,8 +1105,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1053static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 1105static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1054{ 1106{
1055 struct ibmveth_adapter *adapter = dev->priv; 1107 struct ibmveth_adapter *adapter = dev->priv;
1108 struct vio_dev *viodev = adapter->vdev;
1056 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 1109 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1057 int i, rc; 1110 int i;
1058 1111
1059 if (new_mtu < IBMVETH_MAX_MTU) 1112 if (new_mtu < IBMVETH_MAX_MTU)
1060 return -EINVAL; 1113 return -EINVAL;
@@ -1085,10 +1138,15 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1085 ibmveth_close(adapter->netdev); 1138 ibmveth_close(adapter->netdev);
1086 adapter->pool_config = 0; 1139 adapter->pool_config = 0;
1087 dev->mtu = new_mtu; 1140 dev->mtu = new_mtu;
1088 if ((rc = ibmveth_open(adapter->netdev))) 1141 vio_cmo_set_dev_desired(viodev,
1089 return rc; 1142 ibmveth_get_desired_dma
1090 } else 1143 (viodev));
1091 dev->mtu = new_mtu; 1144 return ibmveth_open(adapter->netdev);
1145 }
1146 dev->mtu = new_mtu;
1147 vio_cmo_set_dev_desired(viodev,
1148 ibmveth_get_desired_dma
1149 (viodev));
1092 return 0; 1150 return 0;
1093 } 1151 }
1094 } 1152 }
@@ -1103,6 +1161,46 @@ static void ibmveth_poll_controller(struct net_device *dev)
1103} 1161}
1104#endif 1162#endif
1105 1163
1164/**
1165 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1166 *
1167 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1168 *
1169 * Return value:
1170 * Number of bytes of IO data the driver will need to perform well.
1171 */
1172static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1173{
1174 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1175 struct ibmveth_adapter *adapter;
1176 unsigned long ret;
1177 int i;
1178 int rxqentries = 1;
1179
1180 /* netdev inits at probe time along with the structures we need below*/
1181 if (netdev == NULL)
1182 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1183
1184 adapter = netdev_priv(netdev);
1185
1186 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1187 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1188
1189 for (i = 0; i < IbmVethNumBufferPools; i++) {
1190 /* add the size of the active receive buffers */
1191 if (adapter->rx_buff_pool[i].active)
1192 ret +=
1193 adapter->rx_buff_pool[i].size *
1194 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1195 buff_size);
1196 rxqentries += adapter->rx_buff_pool[i].size;
1197 }
1198 /* add the size of the receive queue entries */
1199 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1200
1201 return ret;
1202}
1203
1106static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 1204static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1107{ 1205{
1108 int rc, i; 1206 int rc, i;
@@ -1247,6 +1345,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1247 ibmveth_proc_unregister_adapter(adapter); 1345 ibmveth_proc_unregister_adapter(adapter);
1248 1346
1249 free_netdev(netdev); 1347 free_netdev(netdev);
1348 dev_set_drvdata(&dev->dev, NULL);
1349
1250 return 0; 1350 return 0;
1251} 1351}
1252 1352
@@ -1491,6 +1591,7 @@ static struct vio_driver ibmveth_driver = {
1491 .id_table = ibmveth_device_table, 1591 .id_table = ibmveth_device_table,
1492 .probe = ibmveth_probe, 1592 .probe = ibmveth_probe,
1493 .remove = ibmveth_remove, 1593 .remove = ibmveth_remove,
1594 .get_desired_dma = ibmveth_get_desired_dma,
1494 .driver = { 1595 .driver = {
1495 .name = ibmveth_driver_name, 1596 .name = ibmveth_driver_name,
1496 .owner = THIS_MODULE, 1597 .owner = THIS_MODULE,