diff options
Diffstat (limited to 'drivers/net/ibmveth.c')
| -rw-r--r-- | drivers/net/ibmveth.c | 291 |
1 files changed, 208 insertions, 83 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 52d01027d9e7..666346f6469e 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ | 24 | /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ |
| 25 | /* option of the RS/6000 Platform Architechture to interface with virtual */ | 25 | /* option of the RS/6000 Platform Architechture to interface with virtual */ |
| 26 | /* ethernet NICs that are presented to the partition by the hypervisor. */ | 26 | /* ethernet NICs that are presented to the partition by the hypervisor. */ |
| 27 | /* */ | 27 | /* */ |
| 28 | /**************************************************************************/ | 28 | /**************************************************************************/ |
| 29 | /* | 29 | /* |
| 30 | TODO: | 30 | TODO: |
| @@ -79,7 +79,7 @@ | |||
| 79 | #else | 79 | #else |
| 80 | #define ibmveth_debug_printk_no_adapter(fmt, args...) | 80 | #define ibmveth_debug_printk_no_adapter(fmt, args...) |
| 81 | #define ibmveth_debug_printk(fmt, args...) | 81 | #define ibmveth_debug_printk(fmt, args...) |
| 82 | #define ibmveth_assert(expr) | 82 | #define ibmveth_assert(expr) |
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | static int ibmveth_open(struct net_device *dev); | 85 | static int ibmveth_open(struct net_device *dev); |
| @@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |||
| 96 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | 96 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); |
| 97 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | 97 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
| 98 | static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | 98 | static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); |
| 99 | static struct kobj_type ktype_veth_pool; | ||
| 99 | 100 | ||
| 100 | #ifdef CONFIG_PROC_FS | 101 | #ifdef CONFIG_PROC_FS |
| 101 | #define IBMVETH_PROC_DIR "net/ibmveth" | 102 | #define IBMVETH_PROC_DIR "net/ibmveth" |
| @@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) | |||
| 133 | } | 134 | } |
| 134 | 135 | ||
| 135 | /* setup the initial settings for a buffer pool */ | 136 | /* setup the initial settings for a buffer pool */ |
| 136 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size) | 137 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) |
| 137 | { | 138 | { |
| 138 | pool->size = pool_size; | 139 | pool->size = pool_size; |
| 139 | pool->index = pool_index; | 140 | pool->index = pool_index; |
| 140 | pool->buff_size = buff_size; | 141 | pool->buff_size = buff_size; |
| 141 | pool->threshold = pool_size / 2; | 142 | pool->threshold = pool_size / 2; |
| 143 | pool->active = pool_active; | ||
| 142 | } | 144 | } |
| 143 | 145 | ||
| 144 | /* allocate and setup an buffer pool - called during open */ | 146 | /* allocate and setup an buffer pool - called during open */ |
| @@ -146,13 +148,13 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
| 146 | { | 148 | { |
| 147 | int i; | 149 | int i; |
| 148 | 150 | ||
| 149 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); | 151 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); |
| 150 | 152 | ||
| 151 | if(!pool->free_map) { | 153 | if(!pool->free_map) { |
| 152 | return -1; | 154 | return -1; |
| 153 | } | 155 | } |
| 154 | 156 | ||
| 155 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); | 157 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); |
| 156 | if(!pool->dma_addr) { | 158 | if(!pool->dma_addr) { |
| 157 | kfree(pool->free_map); | 159 | kfree(pool->free_map); |
| 158 | pool->free_map = NULL; | 160 | pool->free_map = NULL; |
| @@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
| 180 | atomic_set(&pool->available, 0); | 182 | atomic_set(&pool->available, 0); |
| 181 | pool->producer_index = 0; | 183 | pool->producer_index = 0; |
| 182 | pool->consumer_index = 0; | 184 | pool->consumer_index = 0; |
| 183 | pool->active = 0; | ||
| 184 | 185 | ||
| 185 | return 0; | 186 | return 0; |
| 186 | } | 187 | } |
| @@ -214,7 +215,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
| 214 | 215 | ||
| 215 | free_index = pool->consumer_index++ % pool->size; | 216 | free_index = pool->consumer_index++ % pool->size; |
| 216 | index = pool->free_map[free_index]; | 217 | index = pool->free_map[free_index]; |
| 217 | 218 | ||
| 218 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); | 219 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); |
| 219 | ibmveth_assert(pool->skbuff[index] == NULL); | 220 | ibmveth_assert(pool->skbuff[index] == NULL); |
| 220 | 221 | ||
| @@ -231,10 +232,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
| 231 | desc.desc = 0; | 232 | desc.desc = 0; |
| 232 | desc.fields.valid = 1; | 233 | desc.fields.valid = 1; |
| 233 | desc.fields.length = pool->buff_size; | 234 | desc.fields.length = pool->buff_size; |
| 234 | desc.fields.address = dma_addr; | 235 | desc.fields.address = dma_addr; |
| 235 | 236 | ||
| 236 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 237 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
| 237 | 238 | ||
| 238 | if(lpar_rc != H_SUCCESS) { | 239 | if(lpar_rc != H_SUCCESS) { |
| 239 | pool->free_map[free_index] = index; | 240 | pool->free_map[free_index] = index; |
| 240 | pool->skbuff[index] = NULL; | 241 | pool->skbuff[index] = NULL; |
| @@ -250,13 +251,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
| 250 | adapter->replenish_add_buff_success++; | 251 | adapter->replenish_add_buff_success++; |
| 251 | } | 252 | } |
| 252 | } | 253 | } |
| 253 | 254 | ||
| 254 | mb(); | 255 | mb(); |
| 255 | atomic_add(buffers_added, &(pool->available)); | 256 | atomic_add(buffers_added, &(pool->available)); |
| 256 | } | 257 | } |
| 257 | 258 | ||
| 258 | /* replenish routine */ | 259 | /* replenish routine */ |
| 259 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | 260 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
| 260 | { | 261 | { |
| 261 | int i; | 262 | int i; |
| 262 | 263 | ||
| @@ -264,7 +265,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
| 264 | 265 | ||
| 265 | for(i = 0; i < IbmVethNumBufferPools; i++) | 266 | for(i = 0; i < IbmVethNumBufferPools; i++) |
| 266 | if(adapter->rx_buff_pool[i].active) | 267 | if(adapter->rx_buff_pool[i].active) |
| 267 | ibmveth_replenish_buffer_pool(adapter, | 268 | ibmveth_replenish_buffer_pool(adapter, |
| 268 | &adapter->rx_buff_pool[i]); | 269 | &adapter->rx_buff_pool[i]); |
| 269 | 270 | ||
| 270 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 271 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); |
| @@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm | |||
| 301 | kfree(pool->skbuff); | 302 | kfree(pool->skbuff); |
| 302 | pool->skbuff = NULL; | 303 | pool->skbuff = NULL; |
| 303 | } | 304 | } |
| 304 | pool->active = 0; | ||
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | /* remove a buffer from a pool */ | 307 | /* remove a buffer from a pool */ |
| @@ -372,7 +372,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
| 372 | desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; | 372 | desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; |
| 373 | 373 | ||
| 374 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 374 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
| 375 | 375 | ||
| 376 | if(lpar_rc != H_SUCCESS) { | 376 | if(lpar_rc != H_SUCCESS) { |
| 377 | ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); | 377 | ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); |
| 378 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 378 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
| @@ -407,7 +407,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
| 407 | } | 407 | } |
| 408 | free_page((unsigned long)adapter->buffer_list_addr); | 408 | free_page((unsigned long)adapter->buffer_list_addr); |
| 409 | adapter->buffer_list_addr = NULL; | 409 | adapter->buffer_list_addr = NULL; |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | if(adapter->filter_list_addr != NULL) { | 412 | if(adapter->filter_list_addr != NULL) { |
| 413 | if(!dma_mapping_error(adapter->filter_list_dma)) { | 413 | if(!dma_mapping_error(adapter->filter_list_dma)) { |
| @@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | for(i = 0; i<IbmVethNumBufferPools; i++) | 435 | for(i = 0; i<IbmVethNumBufferPools; i++) |
| 436 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); | 436 | if (adapter->rx_buff_pool[i].active) |
| 437 | ibmveth_free_buffer_pool(adapter, | ||
| 438 | &adapter->rx_buff_pool[i]); | ||
| 437 | } | 439 | } |
| 438 | 440 | ||
| 439 | static int ibmveth_open(struct net_device *netdev) | 441 | static int ibmveth_open(struct net_device *netdev) |
| @@ -450,10 +452,10 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 450 | 452 | ||
| 451 | for(i = 0; i<IbmVethNumBufferPools; i++) | 453 | for(i = 0; i<IbmVethNumBufferPools; i++) |
| 452 | rxq_entries += adapter->rx_buff_pool[i].size; | 454 | rxq_entries += adapter->rx_buff_pool[i].size; |
| 453 | 455 | ||
| 454 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 456 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
| 455 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 457 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
| 456 | 458 | ||
| 457 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { | 459 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { |
| 458 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); | 460 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); |
| 459 | ibmveth_cleanup(adapter); | 461 | ibmveth_cleanup(adapter); |
| @@ -489,9 +491,6 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 489 | adapter->rx_queue.num_slots = rxq_entries; | 491 | adapter->rx_queue.num_slots = rxq_entries; |
| 490 | adapter->rx_queue.toggle = 1; | 492 | adapter->rx_queue.toggle = 1; |
| 491 | 493 | ||
| 492 | /* call change_mtu to init the buffer pools based in initial mtu */ | ||
| 493 | ibmveth_change_mtu(netdev, netdev->mtu); | ||
| 494 | |||
| 495 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 494 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); |
| 496 | mac_address = mac_address >> 16; | 495 | mac_address = mac_address >> 16; |
| 497 | 496 | ||
| @@ -504,7 +503,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 504 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); | 503 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); |
| 505 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); | 504 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); |
| 506 | 505 | ||
| 507 | 506 | ||
| 508 | lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, | 507 | lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, |
| 509 | adapter->buffer_list_dma, | 508 | adapter->buffer_list_dma, |
| 510 | rxq_desc.desc, | 509 | rxq_desc.desc, |
| @@ -519,7 +518,18 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 519 | rxq_desc.desc, | 518 | rxq_desc.desc, |
| 520 | mac_address); | 519 | mac_address); |
| 521 | ibmveth_cleanup(adapter); | 520 | ibmveth_cleanup(adapter); |
| 522 | return -ENONET; | 521 | return -ENONET; |
| 522 | } | ||
| 523 | |||
| 524 | for(i = 0; i<IbmVethNumBufferPools; i++) { | ||
| 525 | if(!adapter->rx_buff_pool[i].active) | ||
| 526 | continue; | ||
| 527 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | ||
| 528 | ibmveth_error_printk("unable to alloc pool\n"); | ||
| 529 | adapter->rx_buff_pool[i].active = 0; | ||
| 530 | ibmveth_cleanup(adapter); | ||
| 531 | return -ENOMEM ; | ||
| 532 | } | ||
| 523 | } | 533 | } |
| 524 | 534 | ||
| 525 | ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); | 535 | ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); |
| @@ -547,10 +557,11 @@ static int ibmveth_close(struct net_device *netdev) | |||
| 547 | { | 557 | { |
| 548 | struct ibmveth_adapter *adapter = netdev->priv; | 558 | struct ibmveth_adapter *adapter = netdev->priv; |
| 549 | long lpar_rc; | 559 | long lpar_rc; |
| 550 | 560 | ||
| 551 | ibmveth_debug_printk("close starting\n"); | 561 | ibmveth_debug_printk("close starting\n"); |
| 552 | 562 | ||
| 553 | netif_stop_queue(netdev); | 563 | if (!adapter->pool_config) |
| 564 | netif_stop_queue(netdev); | ||
| 554 | 565 | ||
| 555 | free_irq(netdev->irq, netdev); | 566 | free_irq(netdev->irq, netdev); |
| 556 | 567 | ||
| @@ -694,7 +705,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 694 | desc[5].desc, | 705 | desc[5].desc, |
| 695 | correlator); | 706 | correlator); |
| 696 | } while ((lpar_rc == H_BUSY) && (retry_count--)); | 707 | } while ((lpar_rc == H_BUSY) && (retry_count--)); |
| 697 | 708 | ||
| 698 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { | 709 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { |
| 699 | int i; | 710 | int i; |
| 700 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); | 711 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); |
| @@ -780,7 +791,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget) | |||
| 780 | /* more work to do - return that we are not done yet */ | 791 | /* more work to do - return that we are not done yet */ |
| 781 | netdev->quota -= frames_processed; | 792 | netdev->quota -= frames_processed; |
| 782 | *budget -= frames_processed; | 793 | *budget -= frames_processed; |
| 783 | return 1; | 794 | return 1; |
| 784 | } | 795 | } |
| 785 | 796 | ||
| 786 | /* we think we are done - reenable interrupts, then check once more to make sure we are done */ | 797 | /* we think we are done - reenable interrupts, then check once more to make sure we are done */ |
| @@ -806,7 +817,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget) | |||
| 806 | } | 817 | } |
| 807 | 818 | ||
| 808 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | 819 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs) |
| 809 | { | 820 | { |
| 810 | struct net_device *netdev = dev_instance; | 821 | struct net_device *netdev = dev_instance; |
| 811 | struct ibmveth_adapter *adapter = netdev->priv; | 822 | struct ibmveth_adapter *adapter = netdev->priv; |
| 812 | unsigned long lpar_rc; | 823 | unsigned long lpar_rc; |
| @@ -862,7 +873,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
| 862 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); | 873 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); |
| 863 | } | 874 | } |
| 864 | } | 875 | } |
| 865 | 876 | ||
| 866 | /* re-enable filtering */ | 877 | /* re-enable filtering */ |
| 867 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 878 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
| 868 | IbmVethMcastEnableFiltering, | 879 | IbmVethMcastEnableFiltering, |
| @@ -876,46 +887,22 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
| 876 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | 887 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) |
| 877 | { | 888 | { |
| 878 | struct ibmveth_adapter *adapter = dev->priv; | 889 | struct ibmveth_adapter *adapter = dev->priv; |
| 890 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; | ||
| 879 | int i; | 891 | int i; |
| 880 | int prev_smaller = 1; | ||
| 881 | 892 | ||
| 882 | if ((new_mtu < 68) || | 893 | if (new_mtu < IBMVETH_MAX_MTU) |
| 883 | (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH)) | ||
| 884 | return -EINVAL; | 894 | return -EINVAL; |
| 885 | 895 | ||
| 896 | /* Look for an active buffer pool that can hold the new MTU */ | ||
| 886 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 897 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
| 887 | int activate = 0; | 898 | if (!adapter->rx_buff_pool[i].active) |
| 888 | if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) { | 899 | continue; |
| 889 | activate = 1; | 900 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
| 890 | prev_smaller= 1; | 901 | dev->mtu = new_mtu; |
| 891 | } else { | 902 | return 0; |
| 892 | if (prev_smaller) | ||
| 893 | activate = 1; | ||
| 894 | prev_smaller= 0; | ||
| 895 | } | 903 | } |
| 896 | |||
| 897 | if (activate && !adapter->rx_buff_pool[i].active) { | ||
| 898 | struct ibmveth_buff_pool *pool = | ||
| 899 | &adapter->rx_buff_pool[i]; | ||
| 900 | if(ibmveth_alloc_buffer_pool(pool)) { | ||
| 901 | ibmveth_error_printk("unable to alloc pool\n"); | ||
| 902 | return -ENOMEM; | ||
| 903 | } | ||
| 904 | adapter->rx_buff_pool[i].active = 1; | ||
| 905 | } else if (!activate && adapter->rx_buff_pool[i].active) { | ||
| 906 | adapter->rx_buff_pool[i].active = 0; | ||
| 907 | h_free_logical_lan_buffer(adapter->vdev->unit_address, | ||
| 908 | (u64)pool_size[i]); | ||
| 909 | } | ||
| 910 | |||
| 911 | } | 904 | } |
| 912 | 905 | return -EINVAL; | |
| 913 | /* kick the interrupt handler so that the new buffer pools get | ||
| 914 | replenished or deallocated */ | ||
| 915 | ibmveth_interrupt(dev->irq, dev, NULL); | ||
| 916 | |||
| 917 | dev->mtu = new_mtu; | ||
| 918 | return 0; | ||
| 919 | } | 906 | } |
| 920 | 907 | ||
| 921 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 908 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
| @@ -928,7 +915,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
| 928 | unsigned int *mcastFilterSize_p; | 915 | unsigned int *mcastFilterSize_p; |
| 929 | 916 | ||
| 930 | 917 | ||
| 931 | ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", | 918 | ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", |
| 932 | dev->unit_address); | 919 | dev->unit_address); |
| 933 | 920 | ||
| 934 | mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0); | 921 | mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0); |
| @@ -937,7 +924,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
| 937 | "attribute\n", __FILE__, __LINE__); | 924 | "attribute\n", __FILE__, __LINE__); |
| 938 | return 0; | 925 | return 0; |
| 939 | } | 926 | } |
| 940 | 927 | ||
| 941 | mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0); | 928 | mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0); |
| 942 | if(!mcastFilterSize_p) { | 929 | if(!mcastFilterSize_p) { |
| 943 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " | 930 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " |
| @@ -945,7 +932,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
| 945 | __FILE__, __LINE__); | 932 | __FILE__, __LINE__); |
| 946 | return 0; | 933 | return 0; |
| 947 | } | 934 | } |
| 948 | 935 | ||
| 949 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); | 936 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); |
| 950 | 937 | ||
| 951 | if(!netdev) | 938 | if(!netdev) |
| @@ -960,13 +947,14 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
| 960 | adapter->vdev = dev; | 947 | adapter->vdev = dev; |
| 961 | adapter->netdev = netdev; | 948 | adapter->netdev = netdev; |
| 962 | adapter->mcastFilterSize= *mcastFilterSize_p; | 949 | adapter->mcastFilterSize= *mcastFilterSize_p; |
| 963 | 950 | adapter->pool_config = 0; | |
| 951 | |||
| 964 | /* Some older boxes running PHYP non-natively have an OF that | 952 | /* Some older boxes running PHYP non-natively have an OF that |
| 965 | returns a 8-byte local-mac-address field (and the first | 953 | returns a 8-byte local-mac-address field (and the first |
| 966 | 2 bytes have to be ignored) while newer boxes' OF return | 954 | 2 bytes have to be ignored) while newer boxes' OF return |
| 967 | a 6-byte field. Note that IEEE 1275 specifies that | 955 | a 6-byte field. Note that IEEE 1275 specifies that |
| 968 | local-mac-address must be a 6-byte field. | 956 | local-mac-address must be a 6-byte field. |
| 969 | The RPA doc specifies that the first byte must be 10b, so | 957 | The RPA doc specifies that the first byte must be 10b, so |
| 970 | we'll just look for it to solve this 8 vs. 6 byte field issue */ | 958 | we'll just look for it to solve this 8 vs. 6 byte field issue */ |
| 971 | 959 | ||
| 972 | if ((*mac_addr_p & 0x3) != 0x02) | 960 | if ((*mac_addr_p & 0x3) != 0x02) |
| @@ -976,7 +964,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
| 976 | memcpy(&adapter->mac_addr, mac_addr_p, 6); | 964 | memcpy(&adapter->mac_addr, mac_addr_p, 6); |
| 977 | 965 | ||
| 978 | adapter->liobn = dev->iommu_table->it_index; | 966 | adapter->liobn = dev->iommu_table->it_index; |
| 979 | 967 | ||
| 980 | netdev->irq = dev->irq; | 968 | netdev->irq = dev->irq; |
| 981 | netdev->open = ibmveth_open; | 969 | netdev->open = ibmveth_open; |
| 982 | netdev->poll = ibmveth_poll; | 970 | netdev->poll = ibmveth_poll; |
| @@ -989,14 +977,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
| 989 | netdev->ethtool_ops = &netdev_ethtool_ops; | 977 | netdev->ethtool_ops = &netdev_ethtool_ops; |
| 990 | netdev->change_mtu = ibmveth_change_mtu; | 978 | netdev->change_mtu = ibmveth_change_mtu; |
| 991 | SET_NETDEV_DEV(netdev, &dev->dev); | 979 | SET_NETDEV_DEV(netdev, &dev->dev); |
| 992 | netdev->features |= NETIF_F_LLTX; | 980 | netdev->features |= NETIF_F_LLTX; |
| 993 | spin_lock_init(&adapter->stats_lock); | 981 | spin_lock_init(&adapter->stats_lock); |
| 994 | 982 | ||
| 995 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 983 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); |
| 996 | 984 | ||
| 997 | for(i = 0; i<IbmVethNumBufferPools; i++) | 985 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
| 998 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, | 986 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
| 999 | pool_count[i], pool_size[i]); | 987 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, |
| 988 | pool_count[i], pool_size[i], | ||
| 989 | pool_active[i]); | ||
| 990 | kobj->parent = &dev->dev.kobj; | ||
| 991 | sprintf(kobj->name, "pool%d", i); | ||
| 992 | kobj->ktype = &ktype_veth_pool; | ||
| 993 | kobject_register(kobj); | ||
| 994 | } | ||
| 1000 | 995 | ||
| 1001 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); | 996 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); |
| 1002 | 997 | ||
| @@ -1025,6 +1020,10 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
| 1025 | { | 1020 | { |
| 1026 | struct net_device *netdev = dev->dev.driver_data; | 1021 | struct net_device *netdev = dev->dev.driver_data; |
| 1027 | struct ibmveth_adapter *adapter = netdev->priv; | 1022 | struct ibmveth_adapter *adapter = netdev->priv; |
| 1023 | int i; | ||
| 1024 | |||
| 1025 | for(i = 0; i<IbmVethNumBufferPools; i++) | ||
| 1026 | kobject_unregister(&adapter->rx_buff_pool[i].kobj); | ||
| 1028 | 1027 | ||
| 1029 | unregister_netdev(netdev); | 1028 | unregister_netdev(netdev); |
| 1030 | 1029 | ||
| @@ -1048,7 +1047,7 @@ static void ibmveth_proc_unregister_driver(void) | |||
| 1048 | remove_proc_entry(IBMVETH_PROC_DIR, NULL); | 1047 | remove_proc_entry(IBMVETH_PROC_DIR, NULL); |
| 1049 | } | 1048 | } |
| 1050 | 1049 | ||
| 1051 | static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) | 1050 | static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) |
| 1052 | { | 1051 | { |
| 1053 | if (*pos == 0) { | 1052 | if (*pos == 0) { |
| 1054 | return (void *)1; | 1053 | return (void *)1; |
| @@ -1063,18 +1062,18 @@ static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 1063 | return NULL; | 1062 | return NULL; |
| 1064 | } | 1063 | } |
| 1065 | 1064 | ||
| 1066 | static void ibmveth_seq_stop(struct seq_file *seq, void *v) | 1065 | static void ibmveth_seq_stop(struct seq_file *seq, void *v) |
| 1067 | { | 1066 | { |
| 1068 | } | 1067 | } |
| 1069 | 1068 | ||
| 1070 | static int ibmveth_seq_show(struct seq_file *seq, void *v) | 1069 | static int ibmveth_seq_show(struct seq_file *seq, void *v) |
| 1071 | { | 1070 | { |
| 1072 | struct ibmveth_adapter *adapter = seq->private; | 1071 | struct ibmveth_adapter *adapter = seq->private; |
| 1073 | char *current_mac = ((char*) &adapter->netdev->dev_addr); | 1072 | char *current_mac = ((char*) &adapter->netdev->dev_addr); |
| 1074 | char *firmware_mac = ((char*) &adapter->mac_addr) ; | 1073 | char *firmware_mac = ((char*) &adapter->mac_addr) ; |
| 1075 | 1074 | ||
| 1076 | seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); | 1075 | seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); |
| 1077 | 1076 | ||
| 1078 | seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); | 1077 | seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); |
| 1079 | seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn); | 1078 | seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn); |
| 1080 | seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", | 1079 | seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", |
| @@ -1083,7 +1082,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) | |||
| 1083 | seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", | 1082 | seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", |
| 1084 | firmware_mac[0], firmware_mac[1], firmware_mac[2], | 1083 | firmware_mac[0], firmware_mac[1], firmware_mac[2], |
| 1085 | firmware_mac[3], firmware_mac[4], firmware_mac[5]); | 1084 | firmware_mac[3], firmware_mac[4], firmware_mac[5]); |
| 1086 | 1085 | ||
| 1087 | seq_printf(seq, "\nAdapter Statistics:\n"); | 1086 | seq_printf(seq, "\nAdapter Statistics:\n"); |
| 1088 | seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); | 1087 | seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); |
| 1089 | seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); | 1088 | seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); |
| @@ -1095,7 +1094,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) | |||
| 1095 | seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); | 1094 | seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); |
| 1096 | seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); | 1095 | seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); |
| 1097 | seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); | 1096 | seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); |
| 1098 | 1097 | ||
| 1099 | return 0; | 1098 | return 0; |
| 1100 | } | 1099 | } |
| 1101 | static struct seq_operations ibmveth_seq_ops = { | 1100 | static struct seq_operations ibmveth_seq_ops = { |
| @@ -1153,11 +1152,11 @@ static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | |||
| 1153 | } | 1152 | } |
| 1154 | 1153 | ||
| 1155 | #else /* CONFIG_PROC_FS */ | 1154 | #else /* CONFIG_PROC_FS */ |
| 1156 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | 1155 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) |
| 1157 | { | 1156 | { |
| 1158 | } | 1157 | } |
| 1159 | 1158 | ||
| 1160 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | 1159 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) |
| 1161 | { | 1160 | { |
| 1162 | } | 1161 | } |
| 1163 | static void ibmveth_proc_register_driver(void) | 1162 | static void ibmveth_proc_register_driver(void) |
| @@ -1169,6 +1168,132 @@ static void ibmveth_proc_unregister_driver(void) | |||
| 1169 | } | 1168 | } |
| 1170 | #endif /* CONFIG_PROC_FS */ | 1169 | #endif /* CONFIG_PROC_FS */ |
| 1171 | 1170 | ||
| 1171 | static struct attribute veth_active_attr; | ||
| 1172 | static struct attribute veth_num_attr; | ||
| 1173 | static struct attribute veth_size_attr; | ||
| 1174 | |||
| 1175 | static ssize_t veth_pool_show(struct kobject * kobj, | ||
| 1176 | struct attribute * attr, char * buf) | ||
| 1177 | { | ||
| 1178 | struct ibmveth_buff_pool *pool = container_of(kobj, | ||
| 1179 | struct ibmveth_buff_pool, | ||
| 1180 | kobj); | ||
| 1181 | |||
| 1182 | if (attr == &veth_active_attr) | ||
| 1183 | return sprintf(buf, "%d\n", pool->active); | ||
| 1184 | else if (attr == &veth_num_attr) | ||
| 1185 | return sprintf(buf, "%d\n", pool->size); | ||
| 1186 | else if (attr == &veth_size_attr) | ||
| 1187 | return sprintf(buf, "%d\n", pool->buff_size); | ||
| 1188 | return 0; | ||
| 1189 | } | ||
| 1190 | |||
| 1191 | static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, | ||
| 1192 | const char * buf, size_t count) | ||
| 1193 | { | ||
| 1194 | struct ibmveth_buff_pool *pool = container_of(kobj, | ||
| 1195 | struct ibmveth_buff_pool, | ||
| 1196 | kobj); | ||
| 1197 | struct net_device *netdev = | ||
| 1198 | container_of(kobj->parent, struct device, kobj)->driver_data; | ||
| 1199 | struct ibmveth_adapter *adapter = netdev->priv; | ||
| 1200 | long value = simple_strtol(buf, NULL, 10); | ||
| 1201 | long rc; | ||
| 1202 | |||
| 1203 | if (attr == &veth_active_attr) { | ||
| 1204 | if (value && !pool->active) { | ||
| 1205 | if(ibmveth_alloc_buffer_pool(pool)) { | ||
| 1206 | ibmveth_error_printk("unable to alloc pool\n"); | ||
| 1207 | return -ENOMEM; | ||
| 1208 | } | ||
| 1209 | pool->active = 1; | ||
| 1210 | adapter->pool_config = 1; | ||
| 1211 | ibmveth_close(netdev); | ||
| 1212 | adapter->pool_config = 0; | ||
| 1213 | if ((rc = ibmveth_open(netdev))) | ||
| 1214 | return rc; | ||
| 1215 | } else if (!value && pool->active) { | ||
| 1216 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; | ||
| 1217 | int i; | ||
| 1218 | /* Make sure there is a buffer pool with buffers that | ||
| 1219 | can hold a packet of the size of the MTU */ | ||
| 1220 | for(i = 0; i<IbmVethNumBufferPools; i++) { | ||
| 1221 | if (pool == &adapter->rx_buff_pool[i]) | ||
| 1222 | continue; | ||
| 1223 | if (!adapter->rx_buff_pool[i].active) | ||
| 1224 | continue; | ||
| 1225 | if (mtu < adapter->rx_buff_pool[i].buff_size) { | ||
| 1226 | pool->active = 0; | ||
| 1227 | h_free_logical_lan_buffer(adapter-> | ||
| 1228 | vdev-> | ||
| 1229 | unit_address, | ||
| 1230 | pool-> | ||
| 1231 | buff_size); | ||
| 1232 | } | ||
| 1233 | } | ||
| 1234 | if (pool->active) { | ||
| 1235 | ibmveth_error_printk("no active pool >= MTU\n"); | ||
| 1236 | return -EPERM; | ||
| 1237 | } | ||
| 1238 | } | ||
| 1239 | } else if (attr == &veth_num_attr) { | ||
| 1240 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | ||
| 1241 | return -EINVAL; | ||
| 1242 | else { | ||
| 1243 | adapter->pool_config = 1; | ||
| 1244 | ibmveth_close(netdev); | ||
| 1245 | adapter->pool_config = 0; | ||
| 1246 | pool->size = value; | ||
| 1247 | if ((rc = ibmveth_open(netdev))) | ||
| 1248 | return rc; | ||
| 1249 | } | ||
| 1250 | } else if (attr == &veth_size_attr) { | ||
| 1251 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) | ||
| 1252 | return -EINVAL; | ||
| 1253 | else { | ||
| 1254 | adapter->pool_config = 1; | ||
| 1255 | ibmveth_close(netdev); | ||
| 1256 | adapter->pool_config = 0; | ||
| 1257 | pool->buff_size = value; | ||
| 1258 | if ((rc = ibmveth_open(netdev))) | ||
| 1259 | return rc; | ||
| 1260 | } | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | /* kick the interrupt handler to allocate/deallocate pools */ | ||
| 1264 | ibmveth_interrupt(netdev->irq, netdev, NULL); | ||
| 1265 | return count; | ||
| 1266 | } | ||
| 1267 | |||
| 1268 | |||
| 1269 | #define ATTR(_name, _mode) \ | ||
| 1270 | struct attribute veth_##_name##_attr = { \ | ||
| 1271 | .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \ | ||
| 1272 | }; | ||
| 1273 | |||
| 1274 | static ATTR(active, 0644); | ||
| 1275 | static ATTR(num, 0644); | ||
| 1276 | static ATTR(size, 0644); | ||
| 1277 | |||
| 1278 | static struct attribute * veth_pool_attrs[] = { | ||
| 1279 | &veth_active_attr, | ||
| 1280 | &veth_num_attr, | ||
| 1281 | &veth_size_attr, | ||
| 1282 | NULL, | ||
| 1283 | }; | ||
| 1284 | |||
| 1285 | static struct sysfs_ops veth_pool_ops = { | ||
| 1286 | .show = veth_pool_show, | ||
| 1287 | .store = veth_pool_store, | ||
| 1288 | }; | ||
| 1289 | |||
| 1290 | static struct kobj_type ktype_veth_pool = { | ||
| 1291 | .release = NULL, | ||
| 1292 | .sysfs_ops = &veth_pool_ops, | ||
| 1293 | .default_attrs = veth_pool_attrs, | ||
| 1294 | }; | ||
| 1295 | |||
| 1296 | |||
| 1172 | static struct vio_device_id ibmveth_device_table[] __devinitdata= { | 1297 | static struct vio_device_id ibmveth_device_table[] __devinitdata= { |
| 1173 | { "network", "IBM,l-lan"}, | 1298 | { "network", "IBM,l-lan"}, |
| 1174 | { "", "" } | 1299 | { "", "" } |
| @@ -1198,7 +1323,7 @@ static void __exit ibmveth_module_exit(void) | |||
| 1198 | { | 1323 | { |
| 1199 | vio_unregister_driver(&ibmveth_driver); | 1324 | vio_unregister_driver(&ibmveth_driver); |
| 1200 | ibmveth_proc_unregister_driver(); | 1325 | ibmveth_proc_unregister_driver(); |
| 1201 | } | 1326 | } |
| 1202 | 1327 | ||
| 1203 | module_init(ibmveth_module_init); | 1328 | module_init(ibmveth_module_init); |
| 1204 | module_exit(ibmveth_module_exit); | 1329 | module_exit(ibmveth_module_exit); |
