diff options
Diffstat (limited to 'drivers/net/ibmveth.c')
| -rw-r--r-- | drivers/net/ibmveth.c | 219 |
1 files changed, 164 insertions, 55 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 00527805e4f1..a03fe1fb61ca 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | */ | 33 | */ |
| 34 | 34 | ||
| 35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
| 36 | #include <linux/moduleparam.h> | ||
| 36 | #include <linux/types.h> | 37 | #include <linux/types.h> |
| 37 | #include <linux/errno.h> | 38 | #include <linux/errno.h> |
| 38 | #include <linux/ioport.h> | 39 | #include <linux/ioport.h> |
| @@ -52,7 +53,9 @@ | |||
| 52 | #include <asm/hvcall.h> | 53 | #include <asm/hvcall.h> |
| 53 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
| 54 | #include <asm/vio.h> | 55 | #include <asm/vio.h> |
| 56 | #include <asm/iommu.h> | ||
| 55 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
| 58 | #include <asm/firmware.h> | ||
| 56 | #include <linux/seq_file.h> | 59 | #include <linux/seq_file.h> |
| 57 | 60 | ||
| 58 | #include "ibmveth.h" | 61 | #include "ibmveth.h" |
| @@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |||
| 94 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | 97 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); |
| 95 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); | 98 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); |
| 96 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | 99 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); |
| 100 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); | ||
| 97 | static struct kobj_type ktype_veth_pool; | 101 | static struct kobj_type ktype_veth_pool; |
| 98 | 102 | ||
| 103 | |||
| 99 | #ifdef CONFIG_PROC_FS | 104 | #ifdef CONFIG_PROC_FS |
| 100 | #define IBMVETH_PROC_DIR "ibmveth" | 105 | #define IBMVETH_PROC_DIR "ibmveth" |
| 101 | static struct proc_dir_entry *ibmveth_proc_dir; | 106 | static struct proc_dir_entry *ibmveth_proc_dir; |
| @@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
| 226 | u32 i; | 231 | u32 i; |
| 227 | u32 count = pool->size - atomic_read(&pool->available); | 232 | u32 count = pool->size - atomic_read(&pool->available); |
| 228 | u32 buffers_added = 0; | 233 | u32 buffers_added = 0; |
| 234 | struct sk_buff *skb; | ||
| 235 | unsigned int free_index, index; | ||
| 236 | u64 correlator; | ||
| 237 | unsigned long lpar_rc; | ||
| 238 | dma_addr_t dma_addr; | ||
| 229 | 239 | ||
| 230 | mb(); | 240 | mb(); |
| 231 | 241 | ||
| 232 | for(i = 0; i < count; ++i) { | 242 | for(i = 0; i < count; ++i) { |
| 233 | struct sk_buff *skb; | ||
| 234 | unsigned int free_index, index; | ||
| 235 | u64 correlator; | ||
| 236 | union ibmveth_buf_desc desc; | 243 | union ibmveth_buf_desc desc; |
| 237 | unsigned long lpar_rc; | ||
| 238 | dma_addr_t dma_addr; | ||
| 239 | 244 | ||
| 240 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); | 245 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); |
| 241 | 246 | ||
| @@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
| 255 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
| 256 | pool->buff_size, DMA_FROM_DEVICE); | 261 | pool->buff_size, DMA_FROM_DEVICE); |
| 257 | 262 | ||
| 263 | if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) | ||
| 264 | goto failure; | ||
| 265 | |||
| 258 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | 266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; |
| 259 | pool->dma_addr[index] = dma_addr; | 267 | pool->dma_addr[index] = dma_addr; |
| 260 | pool->skbuff[index] = skb; | 268 | pool->skbuff[index] = skb; |
| @@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
| 267 | 275 | ||
| 268 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 276 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
| 269 | 277 | ||
| 270 | if(lpar_rc != H_SUCCESS) { | 278 | if (lpar_rc != H_SUCCESS) |
| 271 | pool->free_map[free_index] = index; | 279 | goto failure; |
| 272 | pool->skbuff[index] = NULL; | 280 | else { |
| 273 | if (pool->consumer_index == 0) | ||
| 274 | pool->consumer_index = pool->size - 1; | ||
| 275 | else | ||
| 276 | pool->consumer_index--; | ||
| 277 | dma_unmap_single(&adapter->vdev->dev, | ||
| 278 | pool->dma_addr[index], pool->buff_size, | ||
| 279 | DMA_FROM_DEVICE); | ||
| 280 | dev_kfree_skb_any(skb); | ||
| 281 | adapter->replenish_add_buff_failure++; | ||
| 282 | break; | ||
| 283 | } else { | ||
| 284 | buffers_added++; | 281 | buffers_added++; |
| 285 | adapter->replenish_add_buff_success++; | 282 | adapter->replenish_add_buff_success++; |
| 286 | } | 283 | } |
| @@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
| 288 | 285 | ||
| 289 | mb(); | 286 | mb(); |
| 290 | atomic_add(buffers_added, &(pool->available)); | 287 | atomic_add(buffers_added, &(pool->available)); |
| 288 | return; | ||
| 289 | |||
| 290 | failure: | ||
| 291 | pool->free_map[free_index] = index; | ||
| 292 | pool->skbuff[index] = NULL; | ||
| 293 | if (pool->consumer_index == 0) | ||
| 294 | pool->consumer_index = pool->size - 1; | ||
| 295 | else | ||
| 296 | pool->consumer_index--; | ||
| 297 | if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) | ||
| 298 | dma_unmap_single(&adapter->vdev->dev, | ||
| 299 | pool->dma_addr[index], pool->buff_size, | ||
| 300 | DMA_FROM_DEVICE); | ||
| 301 | dev_kfree_skb_any(skb); | ||
| 302 | adapter->replenish_add_buff_failure++; | ||
| 303 | |||
| 304 | mb(); | ||
| 305 | atomic_add(buffers_added, &(pool->available)); | ||
| 291 | } | 306 | } |
| 292 | 307 | ||
| 293 | /* replenish routine */ | 308 | /* replenish routine */ |
| @@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
| 297 | 312 | ||
| 298 | adapter->replenish_task_cycles++; | 313 | adapter->replenish_task_cycles++; |
| 299 | 314 | ||
| 300 | for(i = 0; i < IbmVethNumBufferPools; i++) | 315 | for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) |
| 301 | if(adapter->rx_buff_pool[i].active) | 316 | if(adapter->rx_buff_pool[i].active) |
| 302 | ibmveth_replenish_buffer_pool(adapter, | 317 | ibmveth_replenish_buffer_pool(adapter, |
| 303 | &adapter->rx_buff_pool[i]); | 318 | &adapter->rx_buff_pool[i]); |
| @@ -433,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
| 433 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | 448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) |
| 434 | { | 449 | { |
| 435 | int i; | 450 | int i; |
| 451 | struct device *dev = &adapter->vdev->dev; | ||
| 436 | 452 | ||
| 437 | if(adapter->buffer_list_addr != NULL) { | 453 | if(adapter->buffer_list_addr != NULL) { |
| 438 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | 454 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { |
| 439 | dma_unmap_single(&adapter->vdev->dev, | 455 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
| 440 | adapter->buffer_list_dma, 4096, | ||
| 441 | DMA_BIDIRECTIONAL); | 456 | DMA_BIDIRECTIONAL); |
| 442 | adapter->buffer_list_dma = DMA_ERROR_CODE; | 457 | adapter->buffer_list_dma = DMA_ERROR_CODE; |
| 443 | } | 458 | } |
| @@ -446,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
| 446 | } | 461 | } |
| 447 | 462 | ||
| 448 | if(adapter->filter_list_addr != NULL) { | 463 | if(adapter->filter_list_addr != NULL) { |
| 449 | if(!dma_mapping_error(adapter->filter_list_dma)) { | 464 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { |
| 450 | dma_unmap_single(&adapter->vdev->dev, | 465 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, |
| 451 | adapter->filter_list_dma, 4096, | ||
| 452 | DMA_BIDIRECTIONAL); | 466 | DMA_BIDIRECTIONAL); |
| 453 | adapter->filter_list_dma = DMA_ERROR_CODE; | 467 | adapter->filter_list_dma = DMA_ERROR_CODE; |
| 454 | } | 468 | } |
| @@ -457,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
| 457 | } | 471 | } |
| 458 | 472 | ||
| 459 | if(adapter->rx_queue.queue_addr != NULL) { | 473 | if(adapter->rx_queue.queue_addr != NULL) { |
| 460 | if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { | 474 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { |
| 461 | dma_unmap_single(&adapter->vdev->dev, | 475 | dma_unmap_single(dev, |
| 462 | adapter->rx_queue.queue_dma, | 476 | adapter->rx_queue.queue_dma, |
| 463 | adapter->rx_queue.queue_len, | 477 | adapter->rx_queue.queue_len, |
| 464 | DMA_BIDIRECTIONAL); | 478 | DMA_BIDIRECTIONAL); |
| @@ -472,6 +486,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
| 472 | if (adapter->rx_buff_pool[i].active) | 486 | if (adapter->rx_buff_pool[i].active) |
| 473 | ibmveth_free_buffer_pool(adapter, | 487 | ibmveth_free_buffer_pool(adapter, |
| 474 | &adapter->rx_buff_pool[i]); | 488 | &adapter->rx_buff_pool[i]); |
| 489 | |||
| 490 | if (adapter->bounce_buffer != NULL) { | ||
| 491 | if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { | ||
| 492 | dma_unmap_single(&adapter->vdev->dev, | ||
| 493 | adapter->bounce_buffer_dma, | ||
| 494 | adapter->netdev->mtu + IBMVETH_BUFF_OH, | ||
| 495 | DMA_BIDIRECTIONAL); | ||
| 496 | adapter->bounce_buffer_dma = DMA_ERROR_CODE; | ||
| 497 | } | ||
| 498 | kfree(adapter->bounce_buffer); | ||
| 499 | adapter->bounce_buffer = NULL; | ||
| 500 | } | ||
| 475 | } | 501 | } |
| 476 | 502 | ||
| 477 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | 503 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, |
| @@ -508,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 508 | int rc; | 534 | int rc; |
| 509 | union ibmveth_buf_desc rxq_desc; | 535 | union ibmveth_buf_desc rxq_desc; |
| 510 | int i; | 536 | int i; |
| 537 | struct device *dev; | ||
| 511 | 538 | ||
| 512 | ibmveth_debug_printk("open starting\n"); | 539 | ibmveth_debug_printk("open starting\n"); |
| 513 | 540 | ||
| @@ -536,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 536 | return -ENOMEM; | 563 | return -ENOMEM; |
| 537 | } | 564 | } |
| 538 | 565 | ||
| 539 | adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, | 566 | dev = &adapter->vdev->dev; |
| 567 | |||
| 568 | adapter->buffer_list_dma = dma_map_single(dev, | ||
| 540 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); | 569 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
| 541 | adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, | 570 | adapter->filter_list_dma = dma_map_single(dev, |
| 542 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); | 571 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
| 543 | adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, | 572 | adapter->rx_queue.queue_dma = dma_map_single(dev, |
| 544 | adapter->rx_queue.queue_addr, | 573 | adapter->rx_queue.queue_addr, |
| 545 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); | 574 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); |
| 546 | 575 | ||
| 547 | if((dma_mapping_error(adapter->buffer_list_dma) ) || | 576 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || |
| 548 | (dma_mapping_error(adapter->filter_list_dma)) || | 577 | (dma_mapping_error(dev, adapter->filter_list_dma)) || |
| 549 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | 578 | (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { |
| 550 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 579 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); |
| 551 | ibmveth_cleanup(adapter); | 580 | ibmveth_cleanup(adapter); |
| 552 | napi_disable(&adapter->napi); | 581 | napi_disable(&adapter->napi); |
| @@ -607,6 +636,24 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 607 | return rc; | 636 | return rc; |
| 608 | } | 637 | } |
| 609 | 638 | ||
| 639 | adapter->bounce_buffer = | ||
| 640 | kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); | ||
| 641 | if (!adapter->bounce_buffer) { | ||
| 642 | ibmveth_error_printk("unable to allocate bounce buffer\n"); | ||
| 643 | ibmveth_cleanup(adapter); | ||
| 644 | napi_disable(&adapter->napi); | ||
| 645 | return -ENOMEM; | ||
| 646 | } | ||
| 647 | adapter->bounce_buffer_dma = | ||
| 648 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | ||
| 649 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | ||
| 650 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { | ||
| 651 | ibmveth_error_printk("unable to map bounce buffer\n"); | ||
| 652 | ibmveth_cleanup(adapter); | ||
| 653 | napi_disable(&adapter->napi); | ||
| 654 | return -ENOMEM; | ||
| 655 | } | ||
| 656 | |||
| 610 | ibmveth_debug_printk("initial replenish cycle\n"); | 657 | ibmveth_debug_printk("initial replenish cycle\n"); |
| 611 | ibmveth_interrupt(netdev->irq, netdev); | 658 | ibmveth_interrupt(netdev->irq, netdev); |
| 612 | 659 | ||
| @@ -853,10 +900,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 853 | unsigned int tx_packets = 0; | 900 | unsigned int tx_packets = 0; |
| 854 | unsigned int tx_send_failed = 0; | 901 | unsigned int tx_send_failed = 0; |
| 855 | unsigned int tx_map_failed = 0; | 902 | unsigned int tx_map_failed = 0; |
| 903 | int used_bounce = 0; | ||
| 904 | unsigned long data_dma_addr; | ||
| 856 | 905 | ||
| 857 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; | 906 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; |
| 858 | desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | 907 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
| 859 | skb->len, DMA_TO_DEVICE); | 908 | skb->len, DMA_TO_DEVICE); |
| 860 | 909 | ||
| 861 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 910 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
| 862 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 911 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { |
| @@ -875,12 +924,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 875 | buf[1] = 0; | 924 | buf[1] = 0; |
| 876 | } | 925 | } |
| 877 | 926 | ||
| 878 | if (dma_mapping_error(desc.fields.address)) { | 927 | if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { |
| 879 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 928 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
| 929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | ||
| 930 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, | ||
| 931 | skb->len); | ||
| 932 | desc.fields.address = adapter->bounce_buffer_dma; | ||
| 880 | tx_map_failed++; | 933 | tx_map_failed++; |
| 881 | tx_dropped++; | 934 | used_bounce = 1; |
| 882 | goto out; | 935 | } else |
| 883 | } | 936 | desc.fields.address = data_dma_addr; |
| 884 | 937 | ||
| 885 | /* send the frame. Arbitrarily set retrycount to 1024 */ | 938 | /* send the frame. Arbitrarily set retrycount to 1024 */ |
| 886 | correlator = 0; | 939 | correlator = 0; |
| @@ -904,8 +957,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 904 | netdev->trans_start = jiffies; | 957 | netdev->trans_start = jiffies; |
| 905 | } | 958 | } |
| 906 | 959 | ||
| 907 | dma_unmap_single(&adapter->vdev->dev, desc.fields.address, | 960 | if (!used_bounce) |
| 908 | skb->len, DMA_TO_DEVICE); | 961 | dma_unmap_single(&adapter->vdev->dev, data_dma_addr, |
| 962 | skb->len, DMA_TO_DEVICE); | ||
| 909 | 963 | ||
| 910 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | 964 | out: spin_lock_irqsave(&adapter->stats_lock, flags); |
| 911 | netdev->stats.tx_dropped += tx_dropped; | 965 | netdev->stats.tx_dropped += tx_dropped; |
| @@ -1053,9 +1107,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
| 1053 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | 1107 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) |
| 1054 | { | 1108 | { |
| 1055 | struct ibmveth_adapter *adapter = dev->priv; | 1109 | struct ibmveth_adapter *adapter = dev->priv; |
| 1110 | struct vio_dev *viodev = adapter->vdev; | ||
| 1056 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; | 1111 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; |
| 1057 | int reinit = 0; | 1112 | int i; |
| 1058 | int i, rc; | ||
| 1059 | 1113 | ||
| 1060 | if (new_mtu < IBMVETH_MAX_MTU) | 1114 | if (new_mtu < IBMVETH_MAX_MTU) |
| 1061 | return -EINVAL; | 1115 | return -EINVAL; |
| @@ -1067,23 +1121,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1067 | if (i == IbmVethNumBufferPools) | 1121 | if (i == IbmVethNumBufferPools) |
| 1068 | return -EINVAL; | 1122 | return -EINVAL; |
| 1069 | 1123 | ||
| 1124 | /* Deactivate all the buffer pools so that the next loop can activate | ||
| 1125 | only the buffer pools necessary to hold the new MTU */ | ||
| 1126 | for (i = 0; i < IbmVethNumBufferPools; i++) | ||
| 1127 | if (adapter->rx_buff_pool[i].active) { | ||
| 1128 | ibmveth_free_buffer_pool(adapter, | ||
| 1129 | &adapter->rx_buff_pool[i]); | ||
| 1130 | adapter->rx_buff_pool[i].active = 0; | ||
| 1131 | } | ||
| 1132 | |||
| 1070 | /* Look for an active buffer pool that can hold the new MTU */ | 1133 | /* Look for an active buffer pool that can hold the new MTU */ |
| 1071 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1134 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
| 1072 | if (!adapter->rx_buff_pool[i].active) { | 1135 | adapter->rx_buff_pool[i].active = 1; |
| 1073 | adapter->rx_buff_pool[i].active = 1; | ||
| 1074 | reinit = 1; | ||
| 1075 | } | ||
| 1076 | 1136 | ||
| 1077 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1137 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
| 1078 | if (reinit && netif_running(adapter->netdev)) { | 1138 | if (netif_running(adapter->netdev)) { |
| 1079 | adapter->pool_config = 1; | 1139 | adapter->pool_config = 1; |
| 1080 | ibmveth_close(adapter->netdev); | 1140 | ibmveth_close(adapter->netdev); |
| 1081 | adapter->pool_config = 0; | 1141 | adapter->pool_config = 0; |
| 1082 | dev->mtu = new_mtu; | 1142 | dev->mtu = new_mtu; |
| 1083 | if ((rc = ibmveth_open(adapter->netdev))) | 1143 | vio_cmo_set_dev_desired(viodev, |
| 1084 | return rc; | 1144 | ibmveth_get_desired_dma |
| 1085 | } else | 1145 | (viodev)); |
| 1086 | dev->mtu = new_mtu; | 1146 | return ibmveth_open(adapter->netdev); |
| 1147 | } | ||
| 1148 | dev->mtu = new_mtu; | ||
| 1149 | vio_cmo_set_dev_desired(viodev, | ||
| 1150 | ibmveth_get_desired_dma | ||
| 1151 | (viodev)); | ||
| 1087 | return 0; | 1152 | return 0; |
| 1088 | } | 1153 | } |
| 1089 | } | 1154 | } |
| @@ -1098,6 +1163,46 @@ static void ibmveth_poll_controller(struct net_device *dev) | |||
| 1098 | } | 1163 | } |
| 1099 | #endif | 1164 | #endif |
| 1100 | 1165 | ||
| 1166 | /** | ||
| 1167 | * ibmveth_get_desired_dma - Calculate IO memory desired by the driver | ||
| 1168 | * | ||
| 1169 | * @vdev: struct vio_dev for the device whose desired IO mem is to be returned | ||
| 1170 | * | ||
| 1171 | * Return value: | ||
| 1172 | * Number of bytes of IO data the driver will need to perform well. | ||
| 1173 | */ | ||
| 1174 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | ||
| 1175 | { | ||
| 1176 | struct net_device *netdev = dev_get_drvdata(&vdev->dev); | ||
| 1177 | struct ibmveth_adapter *adapter; | ||
| 1178 | unsigned long ret; | ||
| 1179 | int i; | ||
| 1180 | int rxqentries = 1; | ||
| 1181 | |||
| 1182 | /* netdev inits at probe time along with the structures we need below*/ | ||
| 1183 | if (netdev == NULL) | ||
| 1184 | return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); | ||
| 1185 | |||
| 1186 | adapter = netdev_priv(netdev); | ||
| 1187 | |||
| 1188 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | ||
| 1189 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); | ||
| 1190 | |||
| 1191 | for (i = 0; i < IbmVethNumBufferPools; i++) { | ||
| 1192 | /* add the size of the active receive buffers */ | ||
| 1193 | if (adapter->rx_buff_pool[i].active) | ||
| 1194 | ret += | ||
| 1195 | adapter->rx_buff_pool[i].size * | ||
| 1196 | IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. | ||
| 1197 | buff_size); | ||
| 1198 | rxqentries += adapter->rx_buff_pool[i].size; | ||
| 1199 | } | ||
| 1200 | /* add the size of the receive queue entries */ | ||
| 1201 | ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); | ||
| 1202 | |||
| 1203 | return ret; | ||
| 1204 | } | ||
| 1205 | |||
| 1101 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 1206 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
| 1102 | { | 1207 | { |
| 1103 | int rc, i; | 1208 | int rc, i; |
| @@ -1242,6 +1347,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
| 1242 | ibmveth_proc_unregister_adapter(adapter); | 1347 | ibmveth_proc_unregister_adapter(adapter); |
| 1243 | 1348 | ||
| 1244 | free_netdev(netdev); | 1349 | free_netdev(netdev); |
| 1350 | dev_set_drvdata(&dev->dev, NULL); | ||
| 1351 | |||
| 1245 | return 0; | 1352 | return 0; |
| 1246 | } | 1353 | } |
| 1247 | 1354 | ||
| @@ -1402,14 +1509,15 @@ const char * buf, size_t count) | |||
| 1402 | return -EPERM; | 1509 | return -EPERM; |
| 1403 | } | 1510 | } |
| 1404 | 1511 | ||
| 1405 | pool->active = 0; | ||
| 1406 | if (netif_running(netdev)) { | 1512 | if (netif_running(netdev)) { |
| 1407 | adapter->pool_config = 1; | 1513 | adapter->pool_config = 1; |
| 1408 | ibmveth_close(netdev); | 1514 | ibmveth_close(netdev); |
| 1515 | pool->active = 0; | ||
| 1409 | adapter->pool_config = 0; | 1516 | adapter->pool_config = 0; |
| 1410 | if ((rc = ibmveth_open(netdev))) | 1517 | if ((rc = ibmveth_open(netdev))) |
| 1411 | return rc; | 1518 | return rc; |
| 1412 | } | 1519 | } |
| 1520 | pool->active = 0; | ||
| 1413 | } | 1521 | } |
| 1414 | } else if (attr == &veth_num_attr) { | 1522 | } else if (attr == &veth_num_attr) { |
| 1415 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | 1523 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) |
| @@ -1485,6 +1593,7 @@ static struct vio_driver ibmveth_driver = { | |||
| 1485 | .id_table = ibmveth_device_table, | 1593 | .id_table = ibmveth_device_table, |
| 1486 | .probe = ibmveth_probe, | 1594 | .probe = ibmveth_probe, |
| 1487 | .remove = ibmveth_remove, | 1595 | .remove = ibmveth_remove, |
| 1596 | .get_desired_dma = ibmveth_get_desired_dma, | ||
| 1488 | .driver = { | 1597 | .driver = { |
| 1489 | .name = ibmveth_driver_name, | 1598 | .name = ibmveth_driver_name, |
| 1490 | .owner = THIS_MODULE, | 1599 | .owner = THIS_MODULE, |
