aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
authorSantiago Leon <santil@linux.vnet.ibm.com>2010-09-03 14:28:41 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-06 21:21:49 -0400
commit0c26b6775f36ce447722e8752bc3a006ec832df3 (patch)
tree7c8a4b5b268fd3898d9f008c0aa38882e0ae32cc /drivers/net/ibmveth.c
parent6e8ab30ec677925e8999a9f5bdb028736d22d48c (diff)
ibmveth: Add optional flush of rx buffer
On some machines we can improve the bandwidth by ensuring rx buffers are not in the cache. Add a module option that is disabled by default that flushes rx buffers on insertion. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index d8a89846c6a8..1685e230a389 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -127,6 +127,10 @@ module_param(rx_copybreak, uint, 0644);
127MODULE_PARM_DESC(rx_copybreak, 127MODULE_PARM_DESC(rx_copybreak,
128 "Maximum size of packet that is copied to a new buffer on receive"); 128 "Maximum size of packet that is copied to a new buffer on receive");
129 129
130static unsigned int rx_flush __read_mostly = 0;
131module_param(rx_flush, uint, 0644);
132MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
133
130struct ibmveth_stat { 134struct ibmveth_stat {
131 char name[ETH_GSTRING_LEN]; 135 char name[ETH_GSTRING_LEN];
132 int offset; 136 int offset;
@@ -234,6 +238,14 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
234 return 0; 238 return 0;
235} 239}
236 240
241static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
242{
243 unsigned long offset;
244
245 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
246 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
247}
248
237/* replenish the buffers for a pool. note that we don't need to 249/* replenish the buffers for a pool. note that we don't need to
238 * skb_reserve these since they are used for incoming... 250 * skb_reserve these since they are used for incoming...
239 */ 251 */
@@ -286,6 +298,12 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
286 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; 298 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
287 desc.fields.address = dma_addr; 299 desc.fields.address = dma_addr;
288 300
301 if (rx_flush) {
302 unsigned int len = min(pool->buff_size,
303 adapter->netdev->mtu +
304 IBMVETH_BUFF_OH);
305 ibmveth_flush_buffer(skb->data, len);
306 }
289 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 307 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
290 308
291 if (lpar_rc != H_SUCCESS) 309 if (lpar_rc != H_SUCCESS)
@@ -1095,6 +1113,9 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1095 skb_copy_to_linear_data(new_skb, 1113 skb_copy_to_linear_data(new_skb,
1096 skb->data + offset, 1114 skb->data + offset,
1097 length); 1115 length);
1116 if (rx_flush)
1117 ibmveth_flush_buffer(skb->data,
1118 length + offset);
1098 skb = new_skb; 1119 skb = new_skb;
1099 ibmveth_rxq_recycle_buffer(adapter); 1120 ibmveth_rxq_recycle_buffer(adapter);
1100 } else { 1121 } else {