diff options
-rw-r--r-- | drivers/net/ibmveth.c | 102 | ||||
-rw-r--r-- | drivers/net/ibmveth.h | 18 |
2 files changed, 85 insertions, 35 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 5617bec7fd53..d985b804a762 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -97,6 +97,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |||
97 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | 97 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); |
98 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | 98 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
99 | static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); | 99 | static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); |
100 | static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | ||
100 | 101 | ||
101 | #ifdef CONFIG_PROC_FS | 102 | #ifdef CONFIG_PROC_FS |
102 | #define IBMVETH_PROC_DIR "net/ibmveth" | 103 | #define IBMVETH_PROC_DIR "net/ibmveth" |
@@ -181,6 +182,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
181 | atomic_set(&pool->available, 0); | 182 | atomic_set(&pool->available, 0); |
182 | pool->producer_index = 0; | 183 | pool->producer_index = 0; |
183 | pool->consumer_index = 0; | 184 | pool->consumer_index = 0; |
185 | pool->active = 0; | ||
184 | 186 | ||
185 | return 0; | 187 | return 0; |
186 | } | 188 | } |
@@ -258,9 +260,14 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
258 | /* check if replenishing is needed. */ | 260 | /* check if replenishing is needed. */ |
259 | static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter) | 261 | static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter) |
260 | { | 262 | { |
261 | return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) || | 263 | int i; |
262 | (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) || | 264 | |
263 | (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold)); | 265 | for(i = 0; i < IbmVethNumBufferPools; i++) |
266 | if(adapter->rx_buff_pool[i].active && | ||
267 | (atomic_read(&adapter->rx_buff_pool[i].available) < | ||
268 | adapter->rx_buff_pool[i].threshold)) | ||
269 | return 1; | ||
270 | return 0; | ||
264 | } | 271 | } |
265 | 272 | ||
266 | /* kick the replenish tasklet if we need replenishing and it isn't already running */ | 273 | /* kick the replenish tasklet if we need replenishing and it isn't already running */ |
@@ -275,11 +282,14 @@ static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter | |||
275 | /* replenish tasklet routine */ | 282 | /* replenish tasklet routine */ |
276 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | 283 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
277 | { | 284 | { |
285 | int i; | ||
286 | |||
278 | adapter->replenish_task_cycles++; | 287 | adapter->replenish_task_cycles++; |
279 | 288 | ||
280 | ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); | 289 | for(i = 0; i < IbmVethNumBufferPools; i++) |
281 | ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); | 290 | if(adapter->rx_buff_pool[i].active) |
282 | ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); | 291 | ibmveth_replenish_buffer_pool(adapter, |
292 | &adapter->rx_buff_pool[i]); | ||
283 | 293 | ||
284 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 294 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); |
285 | 295 | ||
@@ -321,6 +331,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm | |||
321 | kfree(pool->skbuff); | 331 | kfree(pool->skbuff); |
322 | pool->skbuff = NULL; | 332 | pool->skbuff = NULL; |
323 | } | 333 | } |
334 | pool->active = 0; | ||
324 | } | 335 | } |
325 | 336 | ||
326 | /* remove a buffer from a pool */ | 337 | /* remove a buffer from a pool */ |
@@ -379,6 +390,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
379 | ibmveth_assert(pool < IbmVethNumBufferPools); | 390 | ibmveth_assert(pool < IbmVethNumBufferPools); |
380 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 391 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); |
381 | 392 | ||
393 | if(!adapter->rx_buff_pool[pool].active) { | ||
394 | ibmveth_rxq_harvest_buffer(adapter); | ||
395 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); | ||
396 | return; | ||
397 | } | ||
398 | |||
382 | desc.desc = 0; | 399 | desc.desc = 0; |
383 | desc.fields.valid = 1; | 400 | desc.fields.valid = 1; |
384 | desc.fields.length = adapter->rx_buff_pool[pool].buff_size; | 401 | desc.fields.length = adapter->rx_buff_pool[pool].buff_size; |
@@ -409,6 +426,8 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
409 | 426 | ||
410 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | 427 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) |
411 | { | 428 | { |
429 | int i; | ||
430 | |||
412 | if(adapter->buffer_list_addr != NULL) { | 431 | if(adapter->buffer_list_addr != NULL) { |
413 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | 432 | if(!dma_mapping_error(adapter->buffer_list_dma)) { |
414 | dma_unmap_single(&adapter->vdev->dev, | 433 | dma_unmap_single(&adapter->vdev->dev, |
@@ -443,26 +462,24 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
443 | adapter->rx_queue.queue_addr = NULL; | 462 | adapter->rx_queue.queue_addr = NULL; |
444 | } | 463 | } |
445 | 464 | ||
446 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); | 465 | for(i = 0; i<IbmVethNumBufferPools; i++) |
447 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); | 466 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); |
448 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]); | ||
449 | } | 467 | } |
450 | 468 | ||
451 | static int ibmveth_open(struct net_device *netdev) | 469 | static int ibmveth_open(struct net_device *netdev) |
452 | { | 470 | { |
453 | struct ibmveth_adapter *adapter = netdev->priv; | 471 | struct ibmveth_adapter *adapter = netdev->priv; |
454 | u64 mac_address = 0; | 472 | u64 mac_address = 0; |
455 | int rxq_entries; | 473 | int rxq_entries = 1; |
456 | unsigned long lpar_rc; | 474 | unsigned long lpar_rc; |
457 | int rc; | 475 | int rc; |
458 | union ibmveth_buf_desc rxq_desc; | 476 | union ibmveth_buf_desc rxq_desc; |
477 | int i; | ||
459 | 478 | ||
460 | ibmveth_debug_printk("open starting\n"); | 479 | ibmveth_debug_printk("open starting\n"); |
461 | 480 | ||
462 | rxq_entries = | 481 | for(i = 0; i<IbmVethNumBufferPools; i++) |
463 | adapter->rx_buff_pool[0].size + | 482 | rxq_entries += adapter->rx_buff_pool[i].size; |
464 | adapter->rx_buff_pool[1].size + | ||
465 | adapter->rx_buff_pool[2].size + 1; | ||
466 | 483 | ||
467 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 484 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
468 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 485 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
@@ -502,14 +519,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
502 | adapter->rx_queue.num_slots = rxq_entries; | 519 | adapter->rx_queue.num_slots = rxq_entries; |
503 | adapter->rx_queue.toggle = 1; | 520 | adapter->rx_queue.toggle = 1; |
504 | 521 | ||
505 | if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || | 522 | /* call change_mtu to init the buffer pools based in initial mtu */ |
506 | ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || | 523 | ibmveth_change_mtu(netdev, netdev->mtu); |
507 | ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2])) | ||
508 | { | ||
509 | ibmveth_error_printk("unable to allocate buffer pools\n"); | ||
510 | ibmveth_cleanup(adapter); | ||
511 | return -ENOMEM; | ||
512 | } | ||
513 | 524 | ||
514 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 525 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); |
515 | mac_address = mac_address >> 16; | 526 | mac_address = mac_address >> 16; |
@@ -885,17 +896,52 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
885 | 896 | ||
886 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | 897 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) |
887 | { | 898 | { |
888 | if ((new_mtu < 68) || (new_mtu > (1<<20))) | 899 | struct ibmveth_adapter *adapter = dev->priv; |
900 | int i; | ||
901 | int prev_smaller = 1; | ||
902 | |||
903 | if ((new_mtu < 68) || | ||
904 | (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH)) | ||
889 | return -EINVAL; | 905 | return -EINVAL; |
906 | |||
907 | for(i = 0; i<IbmVethNumBufferPools; i++) { | ||
908 | int activate = 0; | ||
909 | if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) { | ||
910 | activate = 1; | ||
911 | prev_smaller= 1; | ||
912 | } else { | ||
913 | if (prev_smaller) | ||
914 | activate = 1; | ||
915 | prev_smaller= 0; | ||
916 | } | ||
917 | |||
918 | if (activate && !adapter->rx_buff_pool[i].active) { | ||
919 | struct ibmveth_buff_pool *pool = | ||
920 | &adapter->rx_buff_pool[i]; | ||
921 | if(ibmveth_alloc_buffer_pool(pool)) { | ||
922 | ibmveth_error_printk("unable to alloc pool\n"); | ||
923 | return -ENOMEM; | ||
924 | } | ||
925 | adapter->rx_buff_pool[i].active = 1; | ||
926 | } else if (!activate && adapter->rx_buff_pool[i].active) { | ||
927 | adapter->rx_buff_pool[i].active = 0; | ||
928 | h_free_logical_lan_buffer(adapter->vdev->unit_address, | ||
929 | (u64)pool_size[i]); | ||
930 | } | ||
931 | |||
932 | } | ||
933 | |||
934 | |||
935 | ibmveth_schedule_replenishing(adapter); | ||
890 | dev->mtu = new_mtu; | 936 | dev->mtu = new_mtu; |
891 | return 0; | 937 | return 0; |
892 | } | 938 | } |
893 | 939 | ||
894 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 940 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
895 | { | 941 | { |
896 | int rc; | 942 | int rc, i; |
897 | struct net_device *netdev; | 943 | struct net_device *netdev; |
898 | struct ibmveth_adapter *adapter; | 944 | struct ibmveth_adapter *adapter = NULL; |
899 | 945 | ||
900 | unsigned char *mac_addr_p; | 946 | unsigned char *mac_addr_p; |
901 | unsigned int *mcastFilterSize_p; | 947 | unsigned int *mcastFilterSize_p; |
@@ -965,9 +1011,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
965 | 1011 | ||
966 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1012 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); |
967 | 1013 | ||
968 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); | 1014 | for(i = 0; i<IbmVethNumBufferPools; i++) |
969 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); | 1015 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, |
970 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); | 1016 | pool_count[i], pool_size[i]); |
971 | 1017 | ||
972 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); | 1018 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); |
973 | 1019 | ||
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 51a470da9686..a5d27a9cdf1f 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h | |||
@@ -49,6 +49,7 @@ | |||
49 | #define H_SEND_LOGICAL_LAN 0x120 | 49 | #define H_SEND_LOGICAL_LAN 0x120 |
50 | #define H_MULTICAST_CTRL 0x130 | 50 | #define H_MULTICAST_CTRL 0x130 |
51 | #define H_CHANGE_LOGICAL_LAN_MAC 0x14C | 51 | #define H_CHANGE_LOGICAL_LAN_MAC 0x14C |
52 | #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 | ||
52 | 53 | ||
53 | /* hcall macros */ | 54 | /* hcall macros */ |
54 | #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ | 55 | #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ |
@@ -69,13 +70,15 @@ | |||
69 | #define h_change_logical_lan_mac(ua, mac) \ | 70 | #define h_change_logical_lan_mac(ua, mac) \ |
70 | plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) | 71 | plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) |
71 | 72 | ||
72 | #define IbmVethNumBufferPools 3 | 73 | #define h_free_logical_lan_buffer(ua, bufsize) \ |
73 | #define IbmVethPool0DftSize (1024 * 2) | 74 | plpar_hcall_norets(H_FREE_LOGICAL_LAN_BUFFER, ua, bufsize) |
74 | #define IbmVethPool1DftSize (1024 * 4) | 75 | |
75 | #define IbmVethPool2DftSize (1024 * 10) | 76 | #define IbmVethNumBufferPools 5 |
76 | #define IbmVethPool0DftCnt 256 | 77 | #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ |
77 | #define IbmVethPool1DftCnt 256 | 78 | |
78 | #define IbmVethPool2DftCnt 256 | 79 | /* pool_size should be sorted */ |
80 | static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; | ||
81 | static int pool_count[] = { 256, 768, 256, 256, 256 }; | ||
79 | 82 | ||
80 | #define IBM_VETH_INVALID_MAP ((u16)0xffff) | 83 | #define IBM_VETH_INVALID_MAP ((u16)0xffff) |
81 | 84 | ||
@@ -90,6 +93,7 @@ struct ibmveth_buff_pool { | |||
90 | u16 *free_map; | 93 | u16 *free_map; |
91 | dma_addr_t *dma_addr; | 94 | dma_addr_t *dma_addr; |
92 | struct sk_buff **skbuff; | 95 | struct sk_buff **skbuff; |
96 | int active; | ||
93 | }; | 97 | }; |
94 | 98 | ||
95 | struct ibmveth_rx_q { | 99 | struct ibmveth_rx_q { |