diff options
author | Santiago Leon <santil@us.ibm.com> | 2005-10-26 12:47:01 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-28 16:07:30 -0400 |
commit | b6d35182fe62e57d368062adcc880ca35119d88e (patch) | |
tree | dd7767a40490d2d532cda4d35a18f8b8e614ab19 /drivers/net/ibmveth.c | |
parent | 0abe791e94033b727f2b55670c2966f3d3d3cf70 (diff) |
[PATCH] ibmveth fix buffer pool management
This patch changes the way the ibmveth driver handles the receive
buffers. The old code mallocs and maps all the buffers in the pools
regardless of MTU size and it also limits the number of buffer pools to
three. This patch makes the driver malloc and map the buffers necessary
to support the current MTU. It also changes the hardcoded names of the
buffer pool number, size, and elements to arrays to make it easier to
change (with the hope of making them runtime parameters in the future).
Signed-off-by: Santiago Leon <santil@us.ibm.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r-- | drivers/net/ibmveth.c | 102 |
1 files changed, 74 insertions, 28 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 5617bec7fd53..d985b804a762 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -97,6 +97,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |||
97 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | 97 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); |
98 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | 98 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
99 | static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); | 99 | static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); |
100 | static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | ||
100 | 101 | ||
101 | #ifdef CONFIG_PROC_FS | 102 | #ifdef CONFIG_PROC_FS |
102 | #define IBMVETH_PROC_DIR "net/ibmveth" | 103 | #define IBMVETH_PROC_DIR "net/ibmveth" |
@@ -181,6 +182,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
181 | atomic_set(&pool->available, 0); | 182 | atomic_set(&pool->available, 0); |
182 | pool->producer_index = 0; | 183 | pool->producer_index = 0; |
183 | pool->consumer_index = 0; | 184 | pool->consumer_index = 0; |
185 | pool->active = 0; | ||
184 | 186 | ||
185 | return 0; | 187 | return 0; |
186 | } | 188 | } |
@@ -258,9 +260,14 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
258 | /* check if replenishing is needed. */ | 260 | /* check if replenishing is needed. */ |
259 | static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter) | 261 | static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter) |
260 | { | 262 | { |
261 | return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) || | 263 | int i; |
262 | (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) || | 264 | |
263 | (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold)); | 265 | for(i = 0; i < IbmVethNumBufferPools; i++) |
266 | if(adapter->rx_buff_pool[i].active && | ||
267 | (atomic_read(&adapter->rx_buff_pool[i].available) < | ||
268 | adapter->rx_buff_pool[i].threshold)) | ||
269 | return 1; | ||
270 | return 0; | ||
264 | } | 271 | } |
265 | 272 | ||
266 | /* kick the replenish tasklet if we need replenishing and it isn't already running */ | 273 | /* kick the replenish tasklet if we need replenishing and it isn't already running */ |
@@ -275,11 +282,14 @@ static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter | |||
275 | /* replenish tasklet routine */ | 282 | /* replenish tasklet routine */ |
276 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | 283 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
277 | { | 284 | { |
285 | int i; | ||
286 | |||
278 | adapter->replenish_task_cycles++; | 287 | adapter->replenish_task_cycles++; |
279 | 288 | ||
280 | ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); | 289 | for(i = 0; i < IbmVethNumBufferPools; i++) |
281 | ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); | 290 | if(adapter->rx_buff_pool[i].active) |
282 | ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); | 291 | ibmveth_replenish_buffer_pool(adapter, |
292 | &adapter->rx_buff_pool[i]); | ||
283 | 293 | ||
284 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 294 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); |
285 | 295 | ||
@@ -321,6 +331,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm | |||
321 | kfree(pool->skbuff); | 331 | kfree(pool->skbuff); |
322 | pool->skbuff = NULL; | 332 | pool->skbuff = NULL; |
323 | } | 333 | } |
334 | pool->active = 0; | ||
324 | } | 335 | } |
325 | 336 | ||
326 | /* remove a buffer from a pool */ | 337 | /* remove a buffer from a pool */ |
@@ -379,6 +390,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
379 | ibmveth_assert(pool < IbmVethNumBufferPools); | 390 | ibmveth_assert(pool < IbmVethNumBufferPools); |
380 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 391 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); |
381 | 392 | ||
393 | if(!adapter->rx_buff_pool[pool].active) { | ||
394 | ibmveth_rxq_harvest_buffer(adapter); | ||
395 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); | ||
396 | return; | ||
397 | } | ||
398 | |||
382 | desc.desc = 0; | 399 | desc.desc = 0; |
383 | desc.fields.valid = 1; | 400 | desc.fields.valid = 1; |
384 | desc.fields.length = adapter->rx_buff_pool[pool].buff_size; | 401 | desc.fields.length = adapter->rx_buff_pool[pool].buff_size; |
@@ -409,6 +426,8 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
409 | 426 | ||
410 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | 427 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) |
411 | { | 428 | { |
429 | int i; | ||
430 | |||
412 | if(adapter->buffer_list_addr != NULL) { | 431 | if(adapter->buffer_list_addr != NULL) { |
413 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | 432 | if(!dma_mapping_error(adapter->buffer_list_dma)) { |
414 | dma_unmap_single(&adapter->vdev->dev, | 433 | dma_unmap_single(&adapter->vdev->dev, |
@@ -443,26 +462,24 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
443 | adapter->rx_queue.queue_addr = NULL; | 462 | adapter->rx_queue.queue_addr = NULL; |
444 | } | 463 | } |
445 | 464 | ||
446 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); | 465 | for(i = 0; i<IbmVethNumBufferPools; i++) |
447 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); | 466 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); |
448 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]); | ||
449 | } | 467 | } |
450 | 468 | ||
451 | static int ibmveth_open(struct net_device *netdev) | 469 | static int ibmveth_open(struct net_device *netdev) |
452 | { | 470 | { |
453 | struct ibmveth_adapter *adapter = netdev->priv; | 471 | struct ibmveth_adapter *adapter = netdev->priv; |
454 | u64 mac_address = 0; | 472 | u64 mac_address = 0; |
455 | int rxq_entries; | 473 | int rxq_entries = 1; |
456 | unsigned long lpar_rc; | 474 | unsigned long lpar_rc; |
457 | int rc; | 475 | int rc; |
458 | union ibmveth_buf_desc rxq_desc; | 476 | union ibmveth_buf_desc rxq_desc; |
477 | int i; | ||
459 | 478 | ||
460 | ibmveth_debug_printk("open starting\n"); | 479 | ibmveth_debug_printk("open starting\n"); |
461 | 480 | ||
462 | rxq_entries = | 481 | for(i = 0; i<IbmVethNumBufferPools; i++) |
463 | adapter->rx_buff_pool[0].size + | 482 | rxq_entries += adapter->rx_buff_pool[i].size; |
464 | adapter->rx_buff_pool[1].size + | ||
465 | adapter->rx_buff_pool[2].size + 1; | ||
466 | 483 | ||
467 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 484 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
468 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 485 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
@@ -502,14 +519,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
502 | adapter->rx_queue.num_slots = rxq_entries; | 519 | adapter->rx_queue.num_slots = rxq_entries; |
503 | adapter->rx_queue.toggle = 1; | 520 | adapter->rx_queue.toggle = 1; |
504 | 521 | ||
505 | if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || | 522 | /* call change_mtu to init the buffer pools based in initial mtu */ |
506 | ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || | 523 | ibmveth_change_mtu(netdev, netdev->mtu); |
507 | ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2])) | ||
508 | { | ||
509 | ibmveth_error_printk("unable to allocate buffer pools\n"); | ||
510 | ibmveth_cleanup(adapter); | ||
511 | return -ENOMEM; | ||
512 | } | ||
513 | 524 | ||
514 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 525 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); |
515 | mac_address = mac_address >> 16; | 526 | mac_address = mac_address >> 16; |
@@ -885,17 +896,52 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
885 | 896 | ||
886 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | 897 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) |
887 | { | 898 | { |
888 | if ((new_mtu < 68) || (new_mtu > (1<<20))) | 899 | struct ibmveth_adapter *adapter = dev->priv; |
900 | int i; | ||
901 | int prev_smaller = 1; | ||
902 | |||
903 | if ((new_mtu < 68) || | ||
904 | (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH)) | ||
889 | return -EINVAL; | 905 | return -EINVAL; |
906 | |||
907 | for(i = 0; i<IbmVethNumBufferPools; i++) { | ||
908 | int activate = 0; | ||
909 | if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) { | ||
910 | activate = 1; | ||
911 | prev_smaller= 1; | ||
912 | } else { | ||
913 | if (prev_smaller) | ||
914 | activate = 1; | ||
915 | prev_smaller= 0; | ||
916 | } | ||
917 | |||
918 | if (activate && !adapter->rx_buff_pool[i].active) { | ||
919 | struct ibmveth_buff_pool *pool = | ||
920 | &adapter->rx_buff_pool[i]; | ||
921 | if(ibmveth_alloc_buffer_pool(pool)) { | ||
922 | ibmveth_error_printk("unable to alloc pool\n"); | ||
923 | return -ENOMEM; | ||
924 | } | ||
925 | adapter->rx_buff_pool[i].active = 1; | ||
926 | } else if (!activate && adapter->rx_buff_pool[i].active) { | ||
927 | adapter->rx_buff_pool[i].active = 0; | ||
928 | h_free_logical_lan_buffer(adapter->vdev->unit_address, | ||
929 | (u64)pool_size[i]); | ||
930 | } | ||
931 | |||
932 | } | ||
933 | |||
934 | |||
935 | ibmveth_schedule_replenishing(adapter); | ||
890 | dev->mtu = new_mtu; | 936 | dev->mtu = new_mtu; |
891 | return 0; | 937 | return 0; |
892 | } | 938 | } |
893 | 939 | ||
894 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 940 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
895 | { | 941 | { |
896 | int rc; | 942 | int rc, i; |
897 | struct net_device *netdev; | 943 | struct net_device *netdev; |
898 | struct ibmveth_adapter *adapter; | 944 | struct ibmveth_adapter *adapter = NULL; |
899 | 945 | ||
900 | unsigned char *mac_addr_p; | 946 | unsigned char *mac_addr_p; |
901 | unsigned int *mcastFilterSize_p; | 947 | unsigned int *mcastFilterSize_p; |
@@ -965,9 +1011,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
965 | 1011 | ||
966 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1012 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); |
967 | 1013 | ||
968 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); | 1014 | for(i = 0; i<IbmVethNumBufferPools; i++) |
969 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); | 1015 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, |
970 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); | 1016 | pool_count[i], pool_size[i]); |
971 | 1017 | ||
972 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); | 1018 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); |
973 | 1019 | ||