aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.h
diff options
context:
space:
mode:
authorSantiago Leon <santil@us.ibm.com>2006-04-25 12:19:59 -0400
committerJeff Garzik <jeff@garzik.org>2006-05-24 01:30:37 -0400
commit860f242eb5340d0b0cfe243cb86b2a98f92e8b91 (patch)
tree286d64b4acfc392bcb926a6f5f7bfb311b0d3efc /drivers/net/ibmveth.h
parent7b32a312895c00ff03178e49db8b651ee1e48178 (diff)
[PATCH] ibmveth change buffer pools dynamically
This patch provides a sysfs interface to change some properties of the ibmveth buffer pools (size of the buffers, number of buffers per pool, and whether a pool is active). Ethernet drivers use ethtool to provide this type of functionality. However, the buffers in the ibmveth driver can have an arbitrary size (not only regular, mini, and jumbo which are the only sizes that ethtool can change), and also ibmveth can have an arbitrary number of buffer pools Under heavy load we have seen dropped packets which obviously kills TCP performance. We have created several fixes that mitigate this issue, but we definitely need a way of changing the number of buffers for an adapter dynamically. Also, changing the size of the buffers allows users to change the MTU to something big (bigger than a jumbo frame) greatly improving performance on partition to partition transfers. The patch creates directories pool1...pool4 in the device directory in sysfs, each with files: num, size, and active (which default to the values in the mainline version). Comments and suggestions are welcome... -- Santiago A. Leon Power Linux Development IBM Linux Technology Center Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ibmveth.h')
-rw-r--r--drivers/net/ibmveth.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 46919a814fca..b526dda06900 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -75,10 +75,13 @@
75 75
76#define IbmVethNumBufferPools 5 76#define IbmVethNumBufferPools 5
77#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ 77#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
78#define IBMVETH_MAX_MTU 68
79#define IBMVETH_MAX_POOL_COUNT 4096
80#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
78 81
79/* pool_size should be sorted */
80static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; 82static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
81static int pool_count[] = { 256, 768, 256, 256, 256 }; 83static int pool_count[] = { 256, 768, 256, 256, 256 };
84static int pool_active[] = { 1, 1, 0, 0, 0};
82 85
83#define IBM_VETH_INVALID_MAP ((u16)0xffff) 86#define IBM_VETH_INVALID_MAP ((u16)0xffff)
84 87
@@ -94,6 +97,7 @@ struct ibmveth_buff_pool {
94 dma_addr_t *dma_addr; 97 dma_addr_t *dma_addr;
95 struct sk_buff **skbuff; 98 struct sk_buff **skbuff;
96 int active; 99 int active;
100 struct kobject kobj;
97}; 101};
98 102
99struct ibmveth_rx_q { 103struct ibmveth_rx_q {
@@ -118,6 +122,7 @@ struct ibmveth_adapter {
118 dma_addr_t filter_list_dma; 122 dma_addr_t filter_list_dma;
119 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; 123 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
120 struct ibmveth_rx_q rx_queue; 124 struct ibmveth_rx_q rx_queue;
125 int pool_config;
121 126
122 /* adapter specific stats */ 127 /* adapter specific stats */
123 u64 replenish_task_cycles; 128 u64 replenish_task_cycles;