aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
authorSantiago Leon <santil@us.ibm.com>2006-04-25 12:19:59 -0400
committerJeff Garzik <jeff@garzik.org>2006-05-24 01:30:37 -0400
commit860f242eb5340d0b0cfe243cb86b2a98f92e8b91 (patch)
tree286d64b4acfc392bcb926a6f5f7bfb311b0d3efc /drivers/net/ibmveth.c
parent7b32a312895c00ff03178e49db8b651ee1e48178 (diff)
[PATCH] ibmveth change buffer pools dynamically
This patch provides a sysfs interface to change some properties of the ibmveth buffer pools (size of the buffers, number of buffers per pool, and whether a pool is active). Ethernet drivers use ethtool to provide this type of functionality. However, the buffers in the ibmveth driver can have an arbitrary size (not only regular, mini, and jumbo which are the only sizes that ethtool can change), and also ibmveth can have an arbitrary number of buffer pools Under heavy load we have seen dropped packets which obviously kills TCP performance. We have created several fixes that mitigate this issue, but we definitely need a way of changing the number of buffers for an adapter dynamically. Also, changing the size of the buffers allows users to change the MTU to something big (bigger than a jumbo frame) greatly improving performance on partition to partition transfers. The patch creates directories pool1...pool4 in the device directory in sysfs, each with files: num, size, and active (which default to the values in the mainline version). Comments and suggestions are welcome... -- Santiago A. Leon Power Linux Development IBM Linux Technology Center Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c211
1 files changed, 168 insertions, 43 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 52d01027d9e7..71c74fb048dd 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
96static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 96static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
97static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 97static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
98static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 98static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
99static struct kobj_type ktype_veth_pool;
99 100
100#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
101#define IBMVETH_PROC_DIR "net/ibmveth" 102#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
133} 134}
134 135
135/* setup the initial settings for a buffer pool */ 136/* setup the initial settings for a buffer pool */
136static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size) 137static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
137{ 138{
138 pool->size = pool_size; 139 pool->size = pool_size;
139 pool->index = pool_index; 140 pool->index = pool_index;
140 pool->buff_size = buff_size; 141 pool->buff_size = buff_size;
141 pool->threshold = pool_size / 2; 142 pool->threshold = pool_size / 2;
143 pool->active = pool_active;
142} 144}
143 145
144/* allocate and setup an buffer pool - called during open */ 146/* allocate and setup an buffer pool - called during open */
@@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
180 atomic_set(&pool->available, 0); 182 atomic_set(&pool->available, 0);
181 pool->producer_index = 0; 183 pool->producer_index = 0;
182 pool->consumer_index = 0; 184 pool->consumer_index = 0;
183 pool->active = 0;
184 185
185 return 0; 186 return 0;
186} 187}
@@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
301 kfree(pool->skbuff); 302 kfree(pool->skbuff);
302 pool->skbuff = NULL; 303 pool->skbuff = NULL;
303 } 304 }
304 pool->active = 0;
305} 305}
306 306
307/* remove a buffer from a pool */ 307/* remove a buffer from a pool */
@@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
433 } 433 }
434 434
435 for(i = 0; i<IbmVethNumBufferPools; i++) 435 for(i = 0; i<IbmVethNumBufferPools; i++)
436 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); 436 if (adapter->rx_buff_pool[i].active)
437 ibmveth_free_buffer_pool(adapter,
438 &adapter->rx_buff_pool[i]);
437} 439}
438 440
439static int ibmveth_open(struct net_device *netdev) 441static int ibmveth_open(struct net_device *netdev)
@@ -489,9 +491,6 @@ static int ibmveth_open(struct net_device *netdev)
489 adapter->rx_queue.num_slots = rxq_entries; 491 adapter->rx_queue.num_slots = rxq_entries;
490 adapter->rx_queue.toggle = 1; 492 adapter->rx_queue.toggle = 1;
491 493
492 /* call change_mtu to init the buffer pools based in initial mtu */
493 ibmveth_change_mtu(netdev, netdev->mtu);
494
495 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 494 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
496 mac_address = mac_address >> 16; 495 mac_address = mac_address >> 16;
497 496
@@ -522,6 +521,17 @@ static int ibmveth_open(struct net_device *netdev)
522 return -ENONET; 521 return -ENONET;
523 } 522 }
524 523
524 for(i = 0; i<IbmVethNumBufferPools; i++) {
525 if(!adapter->rx_buff_pool[i].active)
526 continue;
527 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
528 ibmveth_error_printk("unable to alloc pool\n");
529 adapter->rx_buff_pool[i].active = 0;
530 ibmveth_cleanup(adapter);
531 return -ENOMEM ;
532 }
533 }
534
525 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); 535 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
526 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 536 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
527 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 537 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
@@ -550,7 +560,8 @@ static int ibmveth_close(struct net_device *netdev)
550 560
551 ibmveth_debug_printk("close starting\n"); 561 ibmveth_debug_printk("close starting\n");
552 562
553 netif_stop_queue(netdev); 563 if (!adapter->pool_config)
564 netif_stop_queue(netdev);
554 565
555 free_irq(netdev->irq, netdev); 566 free_irq(netdev->irq, netdev);
556 567
@@ -876,46 +887,22 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
876static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 887static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
877{ 888{
878 struct ibmveth_adapter *adapter = dev->priv; 889 struct ibmveth_adapter *adapter = dev->priv;
890 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
879 int i; 891 int i;
880 int prev_smaller = 1;
881 892
882 if ((new_mtu < 68) || 893 if (new_mtu < IBMVETH_MAX_MTU)
883 (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
884 return -EINVAL; 894 return -EINVAL;
885 895
896 /* Look for an active buffer pool that can hold the new MTU */
886 for(i = 0; i<IbmVethNumBufferPools; i++) { 897 for(i = 0; i<IbmVethNumBufferPools; i++) {
887 int activate = 0; 898 if (!adapter->rx_buff_pool[i].active)
888 if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) { 899 continue;
889 activate = 1; 900 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
890 prev_smaller= 1; 901 dev->mtu = new_mtu;
891 } else { 902 return 0;
892 if (prev_smaller)
893 activate = 1;
894 prev_smaller= 0;
895 } 903 }
896
897 if (activate && !adapter->rx_buff_pool[i].active) {
898 struct ibmveth_buff_pool *pool =
899 &adapter->rx_buff_pool[i];
900 if(ibmveth_alloc_buffer_pool(pool)) {
901 ibmveth_error_printk("unable to alloc pool\n");
902 return -ENOMEM;
903 }
904 adapter->rx_buff_pool[i].active = 1;
905 } else if (!activate && adapter->rx_buff_pool[i].active) {
906 adapter->rx_buff_pool[i].active = 0;
907 h_free_logical_lan_buffer(adapter->vdev->unit_address,
908 (u64)pool_size[i]);
909 }
910
911 } 904 }
912 905 return -EINVAL;
913 /* kick the interrupt handler so that the new buffer pools get
914 replenished or deallocated */
915 ibmveth_interrupt(dev->irq, dev, NULL);
916
917 dev->mtu = new_mtu;
918 return 0;
919} 906}
920 907
921static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 908static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -960,6 +947,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
960 adapter->vdev = dev; 947 adapter->vdev = dev;
961 adapter->netdev = netdev; 948 adapter->netdev = netdev;
962 adapter->mcastFilterSize= *mcastFilterSize_p; 949 adapter->mcastFilterSize= *mcastFilterSize_p;
950 adapter->pool_config = 0;
963 951
964 /* Some older boxes running PHYP non-natively have an OF that 952 /* Some older boxes running PHYP non-natively have an OF that
965 returns a 8-byte local-mac-address field (and the first 953 returns a 8-byte local-mac-address field (and the first
@@ -994,9 +982,16 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
994 982
995 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 983 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
996 984
997 for(i = 0; i<IbmVethNumBufferPools; i++) 985 for(i = 0; i<IbmVethNumBufferPools; i++) {
986 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
998 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 987 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
999 pool_count[i], pool_size[i]); 988 pool_count[i], pool_size[i],
989 pool_active[i]);
990 kobj->parent = &dev->dev.kobj;
991 sprintf(kobj->name, "pool%d", i);
992 kobj->ktype = &ktype_veth_pool;
993 kobject_register(kobj);
994 }
1000 995
1001 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 996 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
1002 997
@@ -1025,6 +1020,10 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1025{ 1020{
1026 struct net_device *netdev = dev->dev.driver_data; 1021 struct net_device *netdev = dev->dev.driver_data;
1027 struct ibmveth_adapter *adapter = netdev->priv; 1022 struct ibmveth_adapter *adapter = netdev->priv;
1023 int i;
1024
1025 for(i = 0; i<IbmVethNumBufferPools; i++)
1026 kobject_unregister(&adapter->rx_buff_pool[i].kobj);
1028 1027
1029 unregister_netdev(netdev); 1028 unregister_netdev(netdev);
1030 1029
@@ -1169,6 +1168,132 @@ static void ibmveth_proc_unregister_driver(void)
1169} 1168}
1170#endif /* CONFIG_PROC_FS */ 1169#endif /* CONFIG_PROC_FS */
1171 1170
1171static struct attribute veth_active_attr;
1172static struct attribute veth_num_attr;
1173static struct attribute veth_size_attr;
1174
1175static ssize_t veth_pool_show(struct kobject * kobj,
1176 struct attribute * attr, char * buf)
1177{
1178 struct ibmveth_buff_pool *pool = container_of(kobj,
1179 struct ibmveth_buff_pool,
1180 kobj);
1181
1182 if (attr == &veth_active_attr)
1183 return sprintf(buf, "%d\n", pool->active);
1184 else if (attr == &veth_num_attr)
1185 return sprintf(buf, "%d\n", pool->size);
1186 else if (attr == &veth_size_attr)
1187 return sprintf(buf, "%d\n", pool->buff_size);
1188 return 0;
1189}
1190
1191static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1192const char * buf, size_t count)
1193{
1194 struct ibmveth_buff_pool *pool = container_of(kobj,
1195 struct ibmveth_buff_pool,
1196 kobj);
1197 struct net_device *netdev =
1198 container_of(kobj->parent, struct device, kobj)->driver_data;
1199 struct ibmveth_adapter *adapter = netdev->priv;
1200 long value = simple_strtol(buf, NULL, 10);
1201 long rc;
1202
1203 if (attr == &veth_active_attr) {
1204 if (value && !pool->active) {
1205 if(ibmveth_alloc_buffer_pool(pool)) {
1206 ibmveth_error_printk("unable to alloc pool\n");
1207 return -ENOMEM;
1208 }
1209 pool->active = 1;
1210 adapter->pool_config = 1;
1211 ibmveth_close(netdev);
1212 adapter->pool_config = 0;
1213 if ((rc = ibmveth_open(netdev)))
1214 return rc;
1215 } else if (!value && pool->active) {
1216 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1217 int i;
1218 /* Make sure there is a buffer pool with buffers that
1219 can hold a packet of the size of the MTU */
1220 for(i = 0; i<IbmVethNumBufferPools; i++) {
1221 if (pool == &adapter->rx_buff_pool[i])
1222 continue;
1223 if (!adapter->rx_buff_pool[i].active)
1224 continue;
1225 if (mtu < adapter->rx_buff_pool[i].buff_size) {
1226 pool->active = 0;
1227 h_free_logical_lan_buffer(adapter->
1228 vdev->
1229 unit_address,
1230 pool->
1231 buff_size);
1232 }
1233 }
1234 if (pool->active) {
1235 ibmveth_error_printk("no active pool >= MTU\n");
1236 return -EPERM;
1237 }
1238 }
1239 } else if (attr == &veth_num_attr) {
1240 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1241 return -EINVAL;
1242 else {
1243 adapter->pool_config = 1;
1244 ibmveth_close(netdev);
1245 adapter->pool_config = 0;
1246 pool->size = value;
1247 if ((rc = ibmveth_open(netdev)))
1248 return rc;
1249 }
1250 } else if (attr == &veth_size_attr) {
1251 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1252 return -EINVAL;
1253 else {
1254 adapter->pool_config = 1;
1255 ibmveth_close(netdev);
1256 adapter->pool_config = 0;
1257 pool->buff_size = value;
1258 if ((rc = ibmveth_open(netdev)))
1259 return rc;
1260 }
1261 }
1262
1263 /* kick the interrupt handler to allocate/deallocate pools */
1264 ibmveth_interrupt(netdev->irq, netdev, NULL);
1265 return count;
1266}
1267
1268
1269#define ATTR(_name, _mode) \
1270 struct attribute veth_##_name##_attr = { \
1271 .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
1272 };
1273
1274static ATTR(active, 0644);
1275static ATTR(num, 0644);
1276static ATTR(size, 0644);
1277
1278static struct attribute * veth_pool_attrs[] = {
1279 &veth_active_attr,
1280 &veth_num_attr,
1281 &veth_size_attr,
1282 NULL,
1283};
1284
1285static struct sysfs_ops veth_pool_ops = {
1286 .show = veth_pool_show,
1287 .store = veth_pool_store,
1288};
1289
1290static struct kobj_type ktype_veth_pool = {
1291 .release = NULL,
1292 .sysfs_ops = &veth_pool_ops,
1293 .default_attrs = veth_pool_attrs,
1294};
1295
1296
1172static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1297static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1173 { "network", "IBM,l-lan"}, 1298 { "network", "IBM,l-lan"},
1174 { "", "" } 1299 { "", "" }