aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorSantiago Leon <santil@linux.vnet.ibm.com>2010-09-03 14:29:25 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-06 21:21:53 -0400
commit517e80e6786974651d460a11bb066eab2628ddf1 (patch)
tree26af62f72e53afa69ce4ae0ea9c964533fa31c94 /drivers/net
parent21c2decea0f52980a34c79167fe69df3a84d2788 (diff)
ibmveth: Some formatting fixes
IbmVethNumBufferPools -> IBMVETH_NUM_BUFF_POOLS Also change IBMVETH_MAX_MTU -> IBMVETH_MIN_MTU, it refers to the minimum size not the maximum. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ibmveth.c32
-rw-r--r--drivers/net/ibmveth.h6
2 files changed, 19 insertions, 19 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 98873678e597..e608ee8b5105 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -312,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
312 312
313 adapter->replenish_task_cycles++; 313 adapter->replenish_task_cycles++;
314 314
315 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) { 315 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
316 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; 316 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
317 317
318 if (pool->active && 318 if (pool->active &&
@@ -364,7 +364,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
364 unsigned int free_index; 364 unsigned int free_index;
365 struct sk_buff *skb; 365 struct sk_buff *skb;
366 366
367 ibmveth_assert(pool < IbmVethNumBufferPools); 367 ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS);
368 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 368 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
369 369
370 skb = adapter->rx_buff_pool[pool].skbuff[index]; 370 skb = adapter->rx_buff_pool[pool].skbuff[index];
@@ -397,7 +397,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
397 unsigned int pool = correlator >> 32; 397 unsigned int pool = correlator >> 32;
398 unsigned int index = correlator & 0xffffffffUL; 398 unsigned int index = correlator & 0xffffffffUL;
399 399
400 ibmveth_assert(pool < IbmVethNumBufferPools); 400 ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS);
401 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 401 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
402 402
403 return adapter->rx_buff_pool[pool].skbuff[index]; 403 return adapter->rx_buff_pool[pool].skbuff[index];
@@ -413,7 +413,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
413 union ibmveth_buf_desc desc; 413 union ibmveth_buf_desc desc;
414 unsigned long lpar_rc; 414 unsigned long lpar_rc;
415 415
416 ibmveth_assert(pool < IbmVethNumBufferPools); 416 ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS);
417 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 417 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
418 418
419 if(!adapter->rx_buff_pool[pool].active) { 419 if(!adapter->rx_buff_pool[pool].active) {
@@ -487,7 +487,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
487 adapter->rx_queue.queue_addr = NULL; 487 adapter->rx_queue.queue_addr = NULL;
488 } 488 }
489 489
490 for(i = 0; i<IbmVethNumBufferPools; i++) 490 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
491 if (adapter->rx_buff_pool[i].active) 491 if (adapter->rx_buff_pool[i].active)
492 ibmveth_free_buffer_pool(adapter, 492 ibmveth_free_buffer_pool(adapter,
493 &adapter->rx_buff_pool[i]); 493 &adapter->rx_buff_pool[i]);
@@ -545,7 +545,7 @@ static int ibmveth_open(struct net_device *netdev)
545 545
546 napi_enable(&adapter->napi); 546 napi_enable(&adapter->napi);
547 547
548 for(i = 0; i<IbmVethNumBufferPools; i++) 548 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
549 rxq_entries += adapter->rx_buff_pool[i].size; 549 rxq_entries += adapter->rx_buff_pool[i].size;
550 550
551 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 551 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
@@ -621,7 +621,7 @@ static int ibmveth_open(struct net_device *netdev)
621 return -ENONET; 621 return -ENONET;
622 } 622 }
623 623
624 for(i = 0; i<IbmVethNumBufferPools; i++) { 624 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
625 if(!adapter->rx_buff_pool[i].active) 625 if(!adapter->rx_buff_pool[i].active)
626 continue; 626 continue;
627 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 627 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
@@ -1248,14 +1248,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1248 int i, rc; 1248 int i, rc;
1249 int need_restart = 0; 1249 int need_restart = 0;
1250 1250
1251 if (new_mtu < IBMVETH_MAX_MTU) 1251 if (new_mtu < IBMVETH_MIN_MTU)
1252 return -EINVAL; 1252 return -EINVAL;
1253 1253
1254 for (i = 0; i < IbmVethNumBufferPools; i++) 1254 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1255 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) 1255 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1256 break; 1256 break;
1257 1257
1258 if (i == IbmVethNumBufferPools) 1258 if (i == IBMVETH_NUM_BUFF_POOLS)
1259 return -EINVAL; 1259 return -EINVAL;
1260 1260
1261 /* Deactivate all the buffer pools so that the next loop can activate 1261 /* Deactivate all the buffer pools so that the next loop can activate
@@ -1268,7 +1268,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1268 } 1268 }
1269 1269
1270 /* Look for an active buffer pool that can hold the new MTU */ 1270 /* Look for an active buffer pool that can hold the new MTU */
1271 for(i = 0; i<IbmVethNumBufferPools; i++) { 1271 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1272 adapter->rx_buff_pool[i].active = 1; 1272 adapter->rx_buff_pool[i].active = 1;
1273 1273
1274 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1274 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
@@ -1322,7 +1322,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1322 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1322 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1323 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1323 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1324 1324
1325 for (i = 0; i < IbmVethNumBufferPools; i++) { 1325 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1326 /* add the size of the active receive buffers */ 1326 /* add the size of the active receive buffers */
1327 if (adapter->rx_buff_pool[i].active) 1327 if (adapter->rx_buff_pool[i].active)
1328 ret += 1328 ret +=
@@ -1416,7 +1416,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1416 1416
1417 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 1417 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1418 1418
1419 for(i = 0; i<IbmVethNumBufferPools; i++) { 1419 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1420 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; 1420 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1421 int error; 1421 int error;
1422 1422
@@ -1458,7 +1458,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1458 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1458 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1459 int i; 1459 int i;
1460 1460
1461 for(i = 0; i<IbmVethNumBufferPools; i++) 1461 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1462 kobject_put(&adapter->rx_buff_pool[i].kobj); 1462 kobject_put(&adapter->rx_buff_pool[i].kobj);
1463 1463
1464 unregister_netdev(netdev); 1464 unregister_netdev(netdev);
@@ -1522,7 +1522,7 @@ const char * buf, size_t count)
1522 int i; 1522 int i;
1523 /* Make sure there is a buffer pool with buffers that 1523 /* Make sure there is a buffer pool with buffers that
1524 can hold a packet of the size of the MTU */ 1524 can hold a packet of the size of the MTU */
1525 for (i = 0; i < IbmVethNumBufferPools; i++) { 1525 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1526 if (pool == &adapter->rx_buff_pool[i]) 1526 if (pool == &adapter->rx_buff_pool[i])
1527 continue; 1527 continue;
1528 if (!adapter->rx_buff_pool[i].active) 1528 if (!adapter->rx_buff_pool[i].active)
@@ -1531,7 +1531,7 @@ const char * buf, size_t count)
1531 break; 1531 break;
1532 } 1532 }
1533 1533
1534 if (i == IbmVethNumBufferPools) { 1534 if (i == IBMVETH_NUM_BUFF_POOLS) {
1535 netdev_err(netdev, "no active pool >= MTU\n"); 1535 netdev_err(netdev, "no active pool >= MTU\n");
1536 return -EPERM; 1536 return -EPERM;
1537 } 1537 }
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 84e4ab224124..e6d779471770 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -92,10 +92,10 @@ static inline long h_illan_attributes(unsigned long unit_address,
92#define h_change_logical_lan_mac(ua, mac) \ 92#define h_change_logical_lan_mac(ua, mac) \
93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
94 94
95#define IbmVethNumBufferPools 5 95#define IBMVETH_NUM_BUFF_POOLS 5
96#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ 96#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
97#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ 97#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
98#define IBMVETH_MAX_MTU 68 98#define IBMVETH_MIN_MTU 68
99#define IBMVETH_MAX_POOL_COUNT 4096 99#define IBMVETH_MAX_POOL_COUNT 4096
100#define IBMVETH_BUFF_LIST_SIZE 4096 100#define IBMVETH_BUFF_LIST_SIZE 4096
101#define IBMVETH_FILT_LIST_SIZE 4096 101#define IBMVETH_FILT_LIST_SIZE 4096
@@ -142,7 +142,7 @@ struct ibmveth_adapter {
142 void * filter_list_addr; 142 void * filter_list_addr;
143 dma_addr_t buffer_list_dma; 143 dma_addr_t buffer_list_dma;
144 dma_addr_t filter_list_dma; 144 dma_addr_t filter_list_dma;
145 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; 145 struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
146 struct ibmveth_rx_q rx_queue; 146 struct ibmveth_rx_q rx_queue;
147 int pool_config; 147 int pool_config;
148 int rx_csum; 148 int rx_csum;