diff options
author | Santiago Leon <santil@linux.vnet.ibm.com> | 2010-09-03 14:29:25 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-06 21:21:53 -0400 |
commit | 517e80e6786974651d460a11bb066eab2628ddf1 (patch) | |
tree | 26af62f72e53afa69ce4ae0ea9c964533fa31c94 /drivers/net/ibmveth.c | |
parent | 21c2decea0f52980a34c79167fe69df3a84d2788 (diff) |
ibmveth: Some formatting fixes
IbmVethNumBufferPools -> IBMVETH_NUM_BUFF_POOLS
Also change IBMVETH_MAX_MTU -> IBMVETH_MIN_MTU, it refers to the minimum
size not the maximum.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r-- | drivers/net/ibmveth.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 98873678e597..e608ee8b5105 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -312,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
312 | 312 | ||
313 | adapter->replenish_task_cycles++; | 313 | adapter->replenish_task_cycles++; |
314 | 314 | ||
315 | for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) { | 315 | for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { |
316 | struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; | 316 | struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; |
317 | 317 | ||
318 | if (pool->active && | 318 | if (pool->active && |
@@ -364,7 +364,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 | |||
364 | unsigned int free_index; | 364 | unsigned int free_index; |
365 | struct sk_buff *skb; | 365 | struct sk_buff *skb; |
366 | 366 | ||
367 | ibmveth_assert(pool < IbmVethNumBufferPools); | 367 | ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS); |
368 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 368 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); |
369 | 369 | ||
370 | skb = adapter->rx_buff_pool[pool].skbuff[index]; | 370 | skb = adapter->rx_buff_pool[pool].skbuff[index]; |
@@ -397,7 +397,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada | |||
397 | unsigned int pool = correlator >> 32; | 397 | unsigned int pool = correlator >> 32; |
398 | unsigned int index = correlator & 0xffffffffUL; | 398 | unsigned int index = correlator & 0xffffffffUL; |
399 | 399 | ||
400 | ibmveth_assert(pool < IbmVethNumBufferPools); | 400 | ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS); |
401 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 401 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); |
402 | 402 | ||
403 | return adapter->rx_buff_pool[pool].skbuff[index]; | 403 | return adapter->rx_buff_pool[pool].skbuff[index]; |
@@ -413,7 +413,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
413 | union ibmveth_buf_desc desc; | 413 | union ibmveth_buf_desc desc; |
414 | unsigned long lpar_rc; | 414 | unsigned long lpar_rc; |
415 | 415 | ||
416 | ibmveth_assert(pool < IbmVethNumBufferPools); | 416 | ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS); |
417 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 417 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); |
418 | 418 | ||
419 | if(!adapter->rx_buff_pool[pool].active) { | 419 | if(!adapter->rx_buff_pool[pool].active) { |
@@ -487,7 +487,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
487 | adapter->rx_queue.queue_addr = NULL; | 487 | adapter->rx_queue.queue_addr = NULL; |
488 | } | 488 | } |
489 | 489 | ||
490 | for(i = 0; i<IbmVethNumBufferPools; i++) | 490 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
491 | if (adapter->rx_buff_pool[i].active) | 491 | if (adapter->rx_buff_pool[i].active) |
492 | ibmveth_free_buffer_pool(adapter, | 492 | ibmveth_free_buffer_pool(adapter, |
493 | &adapter->rx_buff_pool[i]); | 493 | &adapter->rx_buff_pool[i]); |
@@ -545,7 +545,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
545 | 545 | ||
546 | napi_enable(&adapter->napi); | 546 | napi_enable(&adapter->napi); |
547 | 547 | ||
548 | for(i = 0; i<IbmVethNumBufferPools; i++) | 548 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
549 | rxq_entries += adapter->rx_buff_pool[i].size; | 549 | rxq_entries += adapter->rx_buff_pool[i].size; |
550 | 550 | ||
551 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 551 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
@@ -621,7 +621,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
621 | return -ENONET; | 621 | return -ENONET; |
622 | } | 622 | } |
623 | 623 | ||
624 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 624 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
625 | if(!adapter->rx_buff_pool[i].active) | 625 | if(!adapter->rx_buff_pool[i].active) |
626 | continue; | 626 | continue; |
627 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | 627 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { |
@@ -1248,14 +1248,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1248 | int i, rc; | 1248 | int i, rc; |
1249 | int need_restart = 0; | 1249 | int need_restart = 0; |
1250 | 1250 | ||
1251 | if (new_mtu < IBMVETH_MAX_MTU) | 1251 | if (new_mtu < IBMVETH_MIN_MTU) |
1252 | return -EINVAL; | 1252 | return -EINVAL; |
1253 | 1253 | ||
1254 | for (i = 0; i < IbmVethNumBufferPools; i++) | 1254 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
1255 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) | 1255 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) |
1256 | break; | 1256 | break; |
1257 | 1257 | ||
1258 | if (i == IbmVethNumBufferPools) | 1258 | if (i == IBMVETH_NUM_BUFF_POOLS) |
1259 | return -EINVAL; | 1259 | return -EINVAL; |
1260 | 1260 | ||
1261 | /* Deactivate all the buffer pools so that the next loop can activate | 1261 | /* Deactivate all the buffer pools so that the next loop can activate |
@@ -1268,7 +1268,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | /* Look for an active buffer pool that can hold the new MTU */ | 1270 | /* Look for an active buffer pool that can hold the new MTU */ |
1271 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1271 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1272 | adapter->rx_buff_pool[i].active = 1; | 1272 | adapter->rx_buff_pool[i].active = 1; |
1273 | 1273 | ||
1274 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1274 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
@@ -1322,7 +1322,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | |||
1322 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | 1322 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; |
1323 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); | 1323 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); |
1324 | 1324 | ||
1325 | for (i = 0; i < IbmVethNumBufferPools; i++) { | 1325 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1326 | /* add the size of the active receive buffers */ | 1326 | /* add the size of the active receive buffers */ |
1327 | if (adapter->rx_buff_pool[i].active) | 1327 | if (adapter->rx_buff_pool[i].active) |
1328 | ret += | 1328 | ret += |
@@ -1416,7 +1416,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1416 | 1416 | ||
1417 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1417 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); |
1418 | 1418 | ||
1419 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1419 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1420 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; | 1420 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
1421 | int error; | 1421 | int error; |
1422 | 1422 | ||
@@ -1458,7 +1458,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
1458 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 1458 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1459 | int i; | 1459 | int i; |
1460 | 1460 | ||
1461 | for(i = 0; i<IbmVethNumBufferPools; i++) | 1461 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
1462 | kobject_put(&adapter->rx_buff_pool[i].kobj); | 1462 | kobject_put(&adapter->rx_buff_pool[i].kobj); |
1463 | 1463 | ||
1464 | unregister_netdev(netdev); | 1464 | unregister_netdev(netdev); |
@@ -1522,7 +1522,7 @@ const char * buf, size_t count) | |||
1522 | int i; | 1522 | int i; |
1523 | /* Make sure there is a buffer pool with buffers that | 1523 | /* Make sure there is a buffer pool with buffers that |
1524 | can hold a packet of the size of the MTU */ | 1524 | can hold a packet of the size of the MTU */ |
1525 | for (i = 0; i < IbmVethNumBufferPools; i++) { | 1525 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1526 | if (pool == &adapter->rx_buff_pool[i]) | 1526 | if (pool == &adapter->rx_buff_pool[i]) |
1527 | continue; | 1527 | continue; |
1528 | if (!adapter->rx_buff_pool[i].active) | 1528 | if (!adapter->rx_buff_pool[i].active) |
@@ -1531,7 +1531,7 @@ const char * buf, size_t count) | |||
1531 | break; | 1531 | break; |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | if (i == IbmVethNumBufferPools) { | 1534 | if (i == IBMVETH_NUM_BUFF_POOLS) { |
1535 | netdev_err(netdev, "no active pool >= MTU\n"); | 1535 | netdev_err(netdev, "no active pool >= MTU\n"); |
1536 | return -EPERM; | 1536 | return -EPERM; |
1537 | } | 1537 | } |