diff options
author | Santiago Leon <santil@linux.vnet.ibm.com> | 2010-09-03 14:29:30 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-06 21:21:54 -0400 |
commit | f148f61d89995660e8aa20a2784ecd9c7f25e2a6 (patch) | |
tree | 1fdfea202141509ec4853ed9c822703f23ad2740 /drivers | |
parent | 517e80e6786974651d460a11bb066eab2628ddf1 (diff) |
ibmveth: Coding style fixes
Fix most of the kernel coding style issues in ibmveth.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ibmveth.c | 317 |
1 files changed, 172 insertions, 145 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index e608ee8b5105..8adfff55792d 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -1,31 +1,29 @@ | |||
1 | /**************************************************************************/ | 1 | /* |
2 | /* */ | 2 | * IBM eServer i/pSeries Virtual Ethernet Device Driver |
3 | /* IBM eServer i/pSeries Virtual Ethernet Device Driver */ | 3 | * Copyright (C) 2003 IBM Corp. |
4 | /* Copyright (C) 2003 IBM Corp. */ | 4 | * Originally written by Dave Larson (larson1@us.ibm.com) |
5 | /* Originally written by Dave Larson (larson1@us.ibm.com) */ | 5 | * Maintained by Santiago Leon (santil@us.ibm.com) |
6 | /* Maintained by Santiago Leon (santil@us.ibm.com) */ | 6 | * |
7 | /* */ | 7 | * This program is free software; you can redistribute it and/or modify |
8 | /* This program is free software; you can redistribute it and/or modify */ | 8 | * it under the terms of the GNU General Public License as published by |
9 | /* it under the terms of the GNU General Public License as published by */ | 9 | * the Free Software Foundation; either version 2 of the License, or |
10 | /* the Free Software Foundation; either version 2 of the License, or */ | 10 | * (at your option) any later version. |
11 | /* (at your option) any later version. */ | 11 | * |
12 | /* */ | 12 | * This program is distributed in the hope that it will be useful, |
13 | /* This program is distributed in the hope that it will be useful, */ | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | 15 | * GNU General Public License for more details. |
16 | /* GNU General Public License for more details. */ | 16 | * |
17 | /* */ | 17 | * You should have received a copy of the GNU General Public License |
18 | /* You should have received a copy of the GNU General Public License */ | 18 | * along with this program; if not, write to the Free Software |
19 | /* along with this program; if not, write to the Free Software */ | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 |
20 | /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */ | 20 | * USA |
21 | /* USA */ | 21 | * |
22 | /* */ | 22 | * This module contains the implementation of a virtual ethernet device |
23 | /* This module contains the implementation of a virtual ethernet device */ | 23 | * for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN |
24 | /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ | 24 | * option of the RS/6000 Platform Architechture to interface with virtual |
25 | /* option of the RS/6000 Platform Architechture to interface with virtual */ | 25 | * ethernet NICs that are presented to the partition by the hypervisor. |
26 | /* ethernet NICs that are presented to the partition by the hypervisor. */ | 26 | */ |
27 | /* */ | ||
28 | /**************************************************************************/ | ||
29 | 27 | ||
30 | #include <linux/module.h> | 28 | #include <linux/module.h> |
31 | #include <linux/moduleparam.h> | 29 | #include <linux/moduleparam.h> |
@@ -59,7 +57,7 @@ | |||
59 | 57 | ||
60 | #ifdef DEBUG | 58 | #ifdef DEBUG |
61 | #define ibmveth_assert(expr) \ | 59 | #define ibmveth_assert(expr) \ |
62 | if(!(expr)) { \ | 60 | if (!(expr)) { \ |
63 | printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \ | 61 | printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \ |
64 | BUG(); \ | 62 | BUG(); \ |
65 | } | 63 | } |
@@ -75,7 +73,8 @@ static struct kobj_type ktype_veth_pool; | |||
75 | 73 | ||
76 | 74 | ||
77 | static const char ibmveth_driver_name[] = "ibmveth"; | 75 | static const char ibmveth_driver_name[] = "ibmveth"; |
78 | static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; | 76 | static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet " |
77 | "Driver"; | ||
79 | #define ibmveth_driver_version "1.03" | 78 | #define ibmveth_driver_version "1.03" |
80 | 79 | ||
81 | MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); | 80 | MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); |
@@ -108,8 +107,10 @@ struct ibmveth_stat { | |||
108 | struct ibmveth_stat ibmveth_stats[] = { | 107 | struct ibmveth_stat ibmveth_stats[] = { |
109 | { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, | 108 | { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, |
110 | { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, | 109 | { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, |
111 | { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, | 110 | { "replenish_add_buff_failure", |
112 | { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, | 111 | IBMVETH_STAT_OFF(replenish_add_buff_failure) }, |
112 | { "replenish_add_buff_success", | ||
113 | IBMVETH_STAT_OFF(replenish_add_buff_success) }, | ||
113 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, | 114 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, |
114 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, | 115 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, |
115 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, | 116 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, |
@@ -126,36 +127,39 @@ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) | |||
126 | 127 | ||
127 | static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) | 128 | static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) |
128 | { | 129 | { |
129 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; | 130 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> |
131 | IBMVETH_RXQ_TOGGLE_SHIFT; | ||
130 | } | 132 | } |
131 | 133 | ||
132 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) | 134 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) |
133 | { | 135 | { |
134 | return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); | 136 | return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; |
135 | } | 137 | } |
136 | 138 | ||
137 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) | 139 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) |
138 | { | 140 | { |
139 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); | 141 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID; |
140 | } | 142 | } |
141 | 143 | ||
142 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) | 144 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) |
143 | { | 145 | { |
144 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); | 146 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; |
145 | } | 147 | } |
146 | 148 | ||
147 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) | 149 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) |
148 | { | 150 | { |
149 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); | 151 | return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length; |
150 | } | 152 | } |
151 | 153 | ||
152 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) | 154 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) |
153 | { | 155 | { |
154 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); | 156 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD; |
155 | } | 157 | } |
156 | 158 | ||
157 | /* setup the initial settings for a buffer pool */ | 159 | /* setup the initial settings for a buffer pool */ |
158 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) | 160 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, |
161 | u32 pool_index, u32 pool_size, | ||
162 | u32 buff_size, u32 pool_active) | ||
159 | { | 163 | { |
160 | pool->size = pool_size; | 164 | pool->size = pool_size; |
161 | pool->index = pool_index; | 165 | pool->index = pool_index; |
@@ -171,12 +175,11 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
171 | 175 | ||
172 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); | 176 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); |
173 | 177 | ||
174 | if(!pool->free_map) { | 178 | if (!pool->free_map) |
175 | return -1; | 179 | return -1; |
176 | } | ||
177 | 180 | ||
178 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); | 181 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); |
179 | if(!pool->dma_addr) { | 182 | if (!pool->dma_addr) { |
180 | kfree(pool->free_map); | 183 | kfree(pool->free_map); |
181 | pool->free_map = NULL; | 184 | pool->free_map = NULL; |
182 | return -1; | 185 | return -1; |
@@ -184,7 +187,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
184 | 187 | ||
185 | pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); | 188 | pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); |
186 | 189 | ||
187 | if(!pool->skbuff) { | 190 | if (!pool->skbuff) { |
188 | kfree(pool->dma_addr); | 191 | kfree(pool->dma_addr); |
189 | pool->dma_addr = NULL; | 192 | pool->dma_addr = NULL; |
190 | 193 | ||
@@ -195,9 +198,8 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
195 | 198 | ||
196 | memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); | 199 | memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); |
197 | 200 | ||
198 | for(i = 0; i < pool->size; ++i) { | 201 | for (i = 0; i < pool->size; ++i) |
199 | pool->free_map[i] = i; | 202 | pool->free_map[i] = i; |
200 | } | ||
201 | 203 | ||
202 | atomic_set(&pool->available, 0); | 204 | atomic_set(&pool->available, 0); |
203 | pool->producer_index = 0; | 205 | pool->producer_index = 0; |
@@ -217,7 +219,8 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length) | |||
217 | /* replenish the buffers for a pool. note that we don't need to | 219 | /* replenish the buffers for a pool. note that we don't need to |
218 | * skb_reserve these since they are used for incoming... | 220 | * skb_reserve these since they are used for incoming... |
219 | */ | 221 | */ |
220 | static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) | 222 | static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, |
223 | struct ibmveth_buff_pool *pool) | ||
221 | { | 224 | { |
222 | u32 i; | 225 | u32 i; |
223 | u32 count = pool->size - atomic_read(&pool->available); | 226 | u32 count = pool->size - atomic_read(&pool->available); |
@@ -230,12 +233,12 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
230 | 233 | ||
231 | mb(); | 234 | mb(); |
232 | 235 | ||
233 | for(i = 0; i < count; ++i) { | 236 | for (i = 0; i < count; ++i) { |
234 | union ibmveth_buf_desc desc; | 237 | union ibmveth_buf_desc desc; |
235 | 238 | ||
236 | skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); | 239 | skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); |
237 | 240 | ||
238 | if(!skb) { | 241 | if (!skb) { |
239 | netdev_dbg(adapter->netdev, | 242 | netdev_dbg(adapter->netdev, |
240 | "replenish: unable to allocate skb\n"); | 243 | "replenish: unable to allocate skb\n"); |
241 | adapter->replenish_no_mem++; | 244 | adapter->replenish_no_mem++; |
@@ -262,7 +265,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
262 | pool->skbuff[index] = skb; | 265 | pool->skbuff[index] = skb; |
263 | 266 | ||
264 | correlator = ((u64)pool->index << 32) | index; | 267 | correlator = ((u64)pool->index << 32) | index; |
265 | *(u64*)skb->data = correlator; | 268 | *(u64 *)skb->data = correlator; |
266 | 269 | ||
267 | desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; | 270 | desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; |
268 | desc.fields.address = dma_addr; | 271 | desc.fields.address = dma_addr; |
@@ -273,11 +276,12 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
273 | IBMVETH_BUFF_OH); | 276 | IBMVETH_BUFF_OH); |
274 | ibmveth_flush_buffer(skb->data, len); | 277 | ibmveth_flush_buffer(skb->data, len); |
275 | } | 278 | } |
276 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 279 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, |
280 | desc.desc); | ||
277 | 281 | ||
278 | if (lpar_rc != H_SUCCESS) | 282 | if (lpar_rc != H_SUCCESS) { |
279 | goto failure; | 283 | goto failure; |
280 | else { | 284 | } else { |
281 | buffers_added++; | 285 | buffers_added++; |
282 | adapter->replenish_add_buff_success++; | 286 | adapter->replenish_add_buff_success++; |
283 | } | 287 | } |
@@ -320,21 +324,23 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
320 | ibmveth_replenish_buffer_pool(adapter, pool); | 324 | ibmveth_replenish_buffer_pool(adapter, pool); |
321 | } | 325 | } |
322 | 326 | ||
323 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 327 | adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + |
328 | 4096 - 8); | ||
324 | } | 329 | } |
325 | 330 | ||
326 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ | 331 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ |
327 | static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) | 332 | static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, |
333 | struct ibmveth_buff_pool *pool) | ||
328 | { | 334 | { |
329 | int i; | 335 | int i; |
330 | 336 | ||
331 | kfree(pool->free_map); | 337 | kfree(pool->free_map); |
332 | pool->free_map = NULL; | 338 | pool->free_map = NULL; |
333 | 339 | ||
334 | if(pool->skbuff && pool->dma_addr) { | 340 | if (pool->skbuff && pool->dma_addr) { |
335 | for(i = 0; i < pool->size; ++i) { | 341 | for (i = 0; i < pool->size; ++i) { |
336 | struct sk_buff *skb = pool->skbuff[i]; | 342 | struct sk_buff *skb = pool->skbuff[i]; |
337 | if(skb) { | 343 | if (skb) { |
338 | dma_unmap_single(&adapter->vdev->dev, | 344 | dma_unmap_single(&adapter->vdev->dev, |
339 | pool->dma_addr[i], | 345 | pool->dma_addr[i], |
340 | pool->buff_size, | 346 | pool->buff_size, |
@@ -345,19 +351,20 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm | |||
345 | } | 351 | } |
346 | } | 352 | } |
347 | 353 | ||
348 | if(pool->dma_addr) { | 354 | if (pool->dma_addr) { |
349 | kfree(pool->dma_addr); | 355 | kfree(pool->dma_addr); |
350 | pool->dma_addr = NULL; | 356 | pool->dma_addr = NULL; |
351 | } | 357 | } |
352 | 358 | ||
353 | if(pool->skbuff) { | 359 | if (pool->skbuff) { |
354 | kfree(pool->skbuff); | 360 | kfree(pool->skbuff); |
355 | pool->skbuff = NULL; | 361 | pool->skbuff = NULL; |
356 | } | 362 | } |
357 | } | 363 | } |
358 | 364 | ||
359 | /* remove a buffer from a pool */ | 365 | /* remove a buffer from a pool */ |
360 | static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) | 366 | static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, |
367 | u64 correlator) | ||
361 | { | 368 | { |
362 | unsigned int pool = correlator >> 32; | 369 | unsigned int pool = correlator >> 32; |
363 | unsigned int index = correlator & 0xffffffffUL; | 370 | unsigned int index = correlator & 0xffffffffUL; |
@@ -416,7 +423,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
416 | ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS); | 423 | ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS); |
417 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 424 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); |
418 | 425 | ||
419 | if(!adapter->rx_buff_pool[pool].active) { | 426 | if (!adapter->rx_buff_pool[pool].active) { |
420 | ibmveth_rxq_harvest_buffer(adapter); | 427 | ibmveth_rxq_harvest_buffer(adapter); |
421 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); | 428 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); |
422 | return; | 429 | return; |
@@ -428,13 +435,13 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
428 | 435 | ||
429 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 436 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
430 | 437 | ||
431 | if(lpar_rc != H_SUCCESS) { | 438 | if (lpar_rc != H_SUCCESS) { |
432 | netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " | 439 | netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " |
433 | "during recycle rc=%ld", lpar_rc); | 440 | "during recycle rc=%ld", lpar_rc); |
434 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 441 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
435 | } | 442 | } |
436 | 443 | ||
437 | if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | 444 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
438 | adapter->rx_queue.index = 0; | 445 | adapter->rx_queue.index = 0; |
439 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | 446 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; |
440 | } | 447 | } |
@@ -444,7 +451,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
444 | { | 451 | { |
445 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 452 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
446 | 453 | ||
447 | if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | 454 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
448 | adapter->rx_queue.index = 0; | 455 | adapter->rx_queue.index = 0; |
449 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | 456 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; |
450 | } | 457 | } |
@@ -455,7 +462,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
455 | int i; | 462 | int i; |
456 | struct device *dev = &adapter->vdev->dev; | 463 | struct device *dev = &adapter->vdev->dev; |
457 | 464 | ||
458 | if(adapter->buffer_list_addr != NULL) { | 465 | if (adapter->buffer_list_addr != NULL) { |
459 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { | 466 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { |
460 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, | 467 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
461 | DMA_BIDIRECTIONAL); | 468 | DMA_BIDIRECTIONAL); |
@@ -465,7 +472,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
465 | adapter->buffer_list_addr = NULL; | 472 | adapter->buffer_list_addr = NULL; |
466 | } | 473 | } |
467 | 474 | ||
468 | if(adapter->filter_list_addr != NULL) { | 475 | if (adapter->filter_list_addr != NULL) { |
469 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { | 476 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { |
470 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | 477 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, |
471 | DMA_BIDIRECTIONAL); | 478 | DMA_BIDIRECTIONAL); |
@@ -475,7 +482,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
475 | adapter->filter_list_addr = NULL; | 482 | adapter->filter_list_addr = NULL; |
476 | } | 483 | } |
477 | 484 | ||
478 | if(adapter->rx_queue.queue_addr != NULL) { | 485 | if (adapter->rx_queue.queue_addr != NULL) { |
479 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { | 486 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { |
480 | dma_unmap_single(dev, | 487 | dma_unmap_single(dev, |
481 | adapter->rx_queue.queue_dma, | 488 | adapter->rx_queue.queue_dma, |
@@ -487,7 +494,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
487 | adapter->rx_queue.queue_addr = NULL; | 494 | adapter->rx_queue.queue_addr = NULL; |
488 | } | 495 | } |
489 | 496 | ||
490 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | 497 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
491 | if (adapter->rx_buff_pool[i].active) | 498 | if (adapter->rx_buff_pool[i].active) |
492 | ibmveth_free_buffer_pool(adapter, | 499 | ibmveth_free_buffer_pool(adapter, |
493 | &adapter->rx_buff_pool[i]); | 500 | &adapter->rx_buff_pool[i]); |
@@ -510,9 +517,11 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | |||
510 | { | 517 | { |
511 | int rc, try_again = 1; | 518 | int rc, try_again = 1; |
512 | 519 | ||
513 | /* After a kexec the adapter will still be open, so our attempt to | 520 | /* |
514 | * open it will fail. So if we get a failure we free the adapter and | 521 | * After a kexec the adapter will still be open, so our attempt to |
515 | * try again, but only once. */ | 522 | * open it will fail. So if we get a failure we free the adapter and |
523 | * try again, but only once. | ||
524 | */ | ||
516 | retry: | 525 | retry: |
517 | rc = h_register_logical_lan(adapter->vdev->unit_address, | 526 | rc = h_register_logical_lan(adapter->vdev->unit_address, |
518 | adapter->buffer_list_dma, rxq_desc.desc, | 527 | adapter->buffer_list_dma, rxq_desc.desc, |
@@ -551,7 +560,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
551 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 560 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
552 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 561 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
553 | 562 | ||
554 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { | 563 | if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { |
555 | netdev_err(netdev, "unable to allocate filter or buffer list " | 564 | netdev_err(netdev, "unable to allocate filter or buffer list " |
556 | "pages\n"); | 565 | "pages\n"); |
557 | ibmveth_cleanup(adapter); | 566 | ibmveth_cleanup(adapter); |
@@ -559,10 +568,12 @@ static int ibmveth_open(struct net_device *netdev) | |||
559 | return -ENOMEM; | 568 | return -ENOMEM; |
560 | } | 569 | } |
561 | 570 | ||
562 | adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; | 571 | adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * |
563 | adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); | 572 | rxq_entries; |
573 | adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, | ||
574 | GFP_KERNEL); | ||
564 | 575 | ||
565 | if(!adapter->rx_queue.queue_addr) { | 576 | if (!adapter->rx_queue.queue_addr) { |
566 | netdev_err(netdev, "unable to allocate rx queue pages\n"); | 577 | netdev_err(netdev, "unable to allocate rx queue pages\n"); |
567 | ibmveth_cleanup(adapter); | 578 | ibmveth_cleanup(adapter); |
568 | napi_disable(&adapter->napi); | 579 | napi_disable(&adapter->napi); |
@@ -596,7 +607,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
596 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 607 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); |
597 | mac_address = mac_address >> 16; | 608 | mac_address = mac_address >> 16; |
598 | 609 | ||
599 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; | 610 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | |
611 | adapter->rx_queue.queue_len; | ||
600 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; | 612 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; |
601 | 613 | ||
602 | netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); | 614 | netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); |
@@ -607,7 +619,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
607 | 619 | ||
608 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); | 620 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); |
609 | 621 | ||
610 | if(lpar_rc != H_SUCCESS) { | 622 | if (lpar_rc != H_SUCCESS) { |
611 | netdev_err(netdev, "h_register_logical_lan failed with %ld\n", | 623 | netdev_err(netdev, "h_register_logical_lan failed with %ld\n", |
612 | lpar_rc); | 624 | lpar_rc); |
613 | netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " | 625 | netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " |
@@ -621,8 +633,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
621 | return -ENONET; | 633 | return -ENONET; |
622 | } | 634 | } |
623 | 635 | ||
624 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 636 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
625 | if(!adapter->rx_buff_pool[i].active) | 637 | if (!adapter->rx_buff_pool[i].active) |
626 | continue; | 638 | continue; |
627 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | 639 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { |
628 | netdev_err(netdev, "unable to alloc pool\n"); | 640 | netdev_err(netdev, "unable to alloc pool\n"); |
@@ -634,7 +646,9 @@ static int ibmveth_open(struct net_device *netdev) | |||
634 | } | 646 | } |
635 | 647 | ||
636 | netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); | 648 | netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); |
637 | if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { | 649 | rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, |
650 | netdev); | ||
651 | if (rc != 0) { | ||
638 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", | 652 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
639 | netdev->irq, rc); | 653 | netdev->irq, rc); |
640 | do { | 654 | do { |
@@ -692,15 +706,15 @@ static int ibmveth_close(struct net_device *netdev) | |||
692 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); | 706 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
693 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); | 707 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
694 | 708 | ||
695 | if(lpar_rc != H_SUCCESS) | 709 | if (lpar_rc != H_SUCCESS) { |
696 | { | ||
697 | netdev_err(netdev, "h_free_logical_lan failed with %lx, " | 710 | netdev_err(netdev, "h_free_logical_lan failed with %lx, " |
698 | "continuing with close\n", lpar_rc); | 711 | "continuing with close\n", lpar_rc); |
699 | } | 712 | } |
700 | 713 | ||
701 | free_irq(netdev->irq, netdev); | 714 | free_irq(netdev->irq, netdev); |
702 | 715 | ||
703 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 716 | adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + |
717 | 4096 - 8); | ||
704 | 718 | ||
705 | ibmveth_cleanup(adapter); | 719 | ibmveth_cleanup(adapter); |
706 | 720 | ||
@@ -709,9 +723,12 @@ static int ibmveth_close(struct net_device *netdev) | |||
709 | return 0; | 723 | return 0; |
710 | } | 724 | } |
711 | 725 | ||
712 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { | 726 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
713 | cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); | 727 | { |
714 | cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); | 728 | cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | |
729 | SUPPORTED_FIBRE); | ||
730 | cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | | ||
731 | ADVERTISED_FIBRE); | ||
715 | cmd->speed = SPEED_1000; | 732 | cmd->speed = SPEED_1000; |
716 | cmd->duplex = DUPLEX_FULL; | 733 | cmd->duplex = DUPLEX_FULL; |
717 | cmd->port = PORT_FIBRE; | 734 | cmd->port = PORT_FIBRE; |
@@ -723,12 +740,16 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
723 | return 0; | 740 | return 0; |
724 | } | 741 | } |
725 | 742 | ||
726 | static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { | 743 | static void netdev_get_drvinfo(struct net_device *dev, |
744 | struct ethtool_drvinfo *info) | ||
745 | { | ||
727 | strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); | 746 | strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); |
728 | strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); | 747 | strncpy(info->version, ibmveth_driver_version, |
748 | sizeof(info->version) - 1); | ||
729 | } | 749 | } |
730 | 750 | ||
731 | static u32 netdev_get_link(struct net_device *dev) { | 751 | static u32 netdev_get_link(struct net_device *dev) |
752 | { | ||
732 | return 1; | 753 | return 1; |
733 | } | 754 | } |
734 | 755 | ||
@@ -736,15 +757,16 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) | |||
736 | { | 757 | { |
737 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 758 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
738 | 759 | ||
739 | if (data) | 760 | if (data) { |
740 | adapter->rx_csum = 1; | 761 | adapter->rx_csum = 1; |
741 | else { | 762 | } else { |
742 | /* | 763 | /* |
743 | * Since the ibmveth firmware interface does not have the concept of | 764 | * Since the ibmveth firmware interface does not have the |
744 | * separate tx/rx checksum offload enable, if rx checksum is disabled | 765 | * concept of separate tx/rx checksum offload enable, if rx |
745 | * we also have to disable tx checksum offload. Once we disable rx | 766 | * checksum is disabled we also have to disable tx checksum |
746 | * checksum offload, we are no longer allowed to send tx buffers that | 767 | * offload. Once we disable rx checksum offload, we are no |
747 | * are not properly checksummed. | 768 | * longer allowed to send tx buffers that are not properly |
769 | * checksummed. | ||
748 | */ | 770 | */ |
749 | adapter->rx_csum = 0; | 771 | adapter->rx_csum = 0; |
750 | dev->features &= ~NETIF_F_IP_CSUM; | 772 | dev->features &= ~NETIF_F_IP_CSUM; |
@@ -811,8 +833,9 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | |||
811 | 833 | ||
812 | ret = h_illan_attributes(adapter->vdev->unit_address, | 834 | ret = h_illan_attributes(adapter->vdev->unit_address, |
813 | set_attr, clr_attr, &ret_attr); | 835 | set_attr, clr_attr, &ret_attr); |
814 | } else | 836 | } else { |
815 | adapter->fw_ipv4_csum_support = data; | 837 | adapter->fw_ipv4_csum_support = data; |
838 | } | ||
816 | 839 | ||
817 | ret6 = h_illan_attributes(adapter->vdev->unit_address, | 840 | ret6 = h_illan_attributes(adapter->vdev->unit_address, |
818 | clr_attr6, set_attr6, &ret_attr); | 841 | clr_attr6, set_attr6, &ret_attr); |
@@ -866,7 +889,8 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) | |||
866 | return 0; | 889 | return 0; |
867 | 890 | ||
868 | if (data && !adapter->rx_csum) | 891 | if (data && !adapter->rx_csum) |
869 | rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); | 892 | rc = ibmveth_set_csum_offload(dev, data, |
893 | ibmveth_set_tx_csum_flags); | ||
870 | else | 894 | else |
871 | ibmveth_set_tx_csum_flags(dev, data); | 895 | ibmveth_set_tx_csum_flags(dev, data); |
872 | 896 | ||
@@ -1091,12 +1115,13 @@ map_failed: | |||
1091 | 1115 | ||
1092 | static int ibmveth_poll(struct napi_struct *napi, int budget) | 1116 | static int ibmveth_poll(struct napi_struct *napi, int budget) |
1093 | { | 1117 | { |
1094 | struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); | 1118 | struct ibmveth_adapter *adapter = |
1119 | container_of(napi, struct ibmveth_adapter, napi); | ||
1095 | struct net_device *netdev = adapter->netdev; | 1120 | struct net_device *netdev = adapter->netdev; |
1096 | int frames_processed = 0; | 1121 | int frames_processed = 0; |
1097 | unsigned long lpar_rc; | 1122 | unsigned long lpar_rc; |
1098 | 1123 | ||
1099 | restart_poll: | 1124 | restart_poll: |
1100 | do { | 1125 | do { |
1101 | if (!ibmveth_rxq_pending_buffer(adapter)) | 1126 | if (!ibmveth_rxq_pending_buffer(adapter)) |
1102 | break; | 1127 | break; |
@@ -1197,7 +1222,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1197 | IbmVethMcastEnableRecv | | 1222 | IbmVethMcastEnableRecv | |
1198 | IbmVethMcastDisableFiltering, | 1223 | IbmVethMcastDisableFiltering, |
1199 | 0); | 1224 | 0); |
1200 | if(lpar_rc != H_SUCCESS) { | 1225 | if (lpar_rc != H_SUCCESS) { |
1201 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " | 1226 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1202 | "entering promisc mode\n", lpar_rc); | 1227 | "entering promisc mode\n", lpar_rc); |
1203 | } | 1228 | } |
@@ -1209,20 +1234,20 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1209 | IbmVethMcastDisableFiltering | | 1234 | IbmVethMcastDisableFiltering | |
1210 | IbmVethMcastClearFilterTable, | 1235 | IbmVethMcastClearFilterTable, |
1211 | 0); | 1236 | 0); |
1212 | if(lpar_rc != H_SUCCESS) { | 1237 | if (lpar_rc != H_SUCCESS) { |
1213 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " | 1238 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1214 | "attempting to clear filter table\n", | 1239 | "attempting to clear filter table\n", |
1215 | lpar_rc); | 1240 | lpar_rc); |
1216 | } | 1241 | } |
1217 | /* add the addresses to the filter table */ | 1242 | /* add the addresses to the filter table */ |
1218 | netdev_for_each_mc_addr(ha, netdev) { | 1243 | netdev_for_each_mc_addr(ha, netdev) { |
1219 | // add the multicast address to the filter table | 1244 | /* add the multicast address to the filter table */ |
1220 | unsigned long mcast_addr = 0; | 1245 | unsigned long mcast_addr = 0; |
1221 | memcpy(((char *)&mcast_addr)+2, ha->addr, 6); | 1246 | memcpy(((char *)&mcast_addr)+2, ha->addr, 6); |
1222 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 1247 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1223 | IbmVethMcastAddFilter, | 1248 | IbmVethMcastAddFilter, |
1224 | mcast_addr); | 1249 | mcast_addr); |
1225 | if(lpar_rc != H_SUCCESS) { | 1250 | if (lpar_rc != H_SUCCESS) { |
1226 | netdev_err(netdev, "h_multicast_ctrl rc=%ld " | 1251 | netdev_err(netdev, "h_multicast_ctrl rc=%ld " |
1227 | "when adding an entry to the filter " | 1252 | "when adding an entry to the filter " |
1228 | "table\n", lpar_rc); | 1253 | "table\n", lpar_rc); |
@@ -1233,7 +1258,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1233 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 1258 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1234 | IbmVethMcastEnableFiltering, | 1259 | IbmVethMcastEnableFiltering, |
1235 | 0); | 1260 | 0); |
1236 | if(lpar_rc != H_SUCCESS) { | 1261 | if (lpar_rc != H_SUCCESS) { |
1237 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " | 1262 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1238 | "enabling filtering\n", lpar_rc); | 1263 | "enabling filtering\n", lpar_rc); |
1239 | } | 1264 | } |
@@ -1268,7 +1293,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1268 | } | 1293 | } |
1269 | 1294 | ||
1270 | /* Look for an active buffer pool that can hold the new MTU */ | 1295 | /* Look for an active buffer pool that can hold the new MTU */ |
1271 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1296 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1272 | adapter->rx_buff_pool[i].active = 1; | 1297 | adapter->rx_buff_pool[i].active = 1; |
1273 | 1298 | ||
1274 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1299 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
@@ -1351,29 +1376,28 @@ static const struct net_device_ops ibmveth_netdev_ops = { | |||
1351 | #endif | 1376 | #endif |
1352 | }; | 1377 | }; |
1353 | 1378 | ||
1354 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 1379 | static int __devinit ibmveth_probe(struct vio_dev *dev, |
1380 | const struct vio_device_id *id) | ||
1355 | { | 1381 | { |
1356 | int rc, i; | 1382 | int rc, i; |
1357 | struct net_device *netdev; | 1383 | struct net_device *netdev; |
1358 | struct ibmveth_adapter *adapter; | 1384 | struct ibmveth_adapter *adapter; |
1359 | |||
1360 | unsigned char *mac_addr_p; | 1385 | unsigned char *mac_addr_p; |
1361 | unsigned int *mcastFilterSize_p; | 1386 | unsigned int *mcastFilterSize_p; |
1362 | 1387 | ||
1363 | |||
1364 | dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", | 1388 | dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", |
1365 | dev->unit_address); | 1389 | dev->unit_address); |
1366 | 1390 | ||
1367 | mac_addr_p = (unsigned char *) vio_get_attribute(dev, | 1391 | mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, |
1368 | VETH_MAC_ADDR, NULL); | 1392 | NULL); |
1369 | if(!mac_addr_p) { | 1393 | if (!mac_addr_p) { |
1370 | dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); | 1394 | dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); |
1371 | return 0; | 1395 | return 0; |
1372 | } | 1396 | } |
1373 | 1397 | ||
1374 | mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev, | 1398 | mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, |
1375 | VETH_MCAST_FILTER_SIZE, NULL); | 1399 | VETH_MCAST_FILTER_SIZE, NULL); |
1376 | if(!mcastFilterSize_p) { | 1400 | if (!mcastFilterSize_p) { |
1377 | dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " | 1401 | dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " |
1378 | "attribute\n"); | 1402 | "attribute\n"); |
1379 | return 0; | 1403 | return 0; |
@@ -1381,7 +1405,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1381 | 1405 | ||
1382 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); | 1406 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); |
1383 | 1407 | ||
1384 | if(!netdev) | 1408 | if (!netdev) |
1385 | return -ENOMEM; | 1409 | return -ENOMEM; |
1386 | 1410 | ||
1387 | adapter = netdev_priv(netdev); | 1411 | adapter = netdev_priv(netdev); |
@@ -1389,19 +1413,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1389 | 1413 | ||
1390 | adapter->vdev = dev; | 1414 | adapter->vdev = dev; |
1391 | adapter->netdev = netdev; | 1415 | adapter->netdev = netdev; |
1392 | adapter->mcastFilterSize= *mcastFilterSize_p; | 1416 | adapter->mcastFilterSize = *mcastFilterSize_p; |
1393 | adapter->pool_config = 0; | 1417 | adapter->pool_config = 0; |
1394 | 1418 | ||
1395 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | 1419 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); |
1396 | 1420 | ||
1397 | /* Some older boxes running PHYP non-natively have an OF that | 1421 | /* |
1398 | returns a 8-byte local-mac-address field (and the first | 1422 | * Some older boxes running PHYP non-natively have an OF that returns |
1399 | 2 bytes have to be ignored) while newer boxes' OF return | 1423 | * a 8-byte local-mac-address field (and the first 2 bytes have to be |
1400 | a 6-byte field. Note that IEEE 1275 specifies that | 1424 | * ignored) while newer boxes' OF return a 6-byte field. Note that |
1401 | local-mac-address must be a 6-byte field. | 1425 | * IEEE 1275 specifies that local-mac-address must be a 6-byte field. |
1402 | The RPA doc specifies that the first byte must be 10b, so | 1426 | * The RPA doc specifies that the first byte must be 10b, so we'll |
1403 | we'll just look for it to solve this 8 vs. 6 byte field issue */ | 1427 | * just look for it to solve this 8 vs. 6 byte field issue |
1404 | 1428 | */ | |
1405 | if ((*mac_addr_p & 0x3) != 0x02) | 1429 | if ((*mac_addr_p & 0x3) != 0x02) |
1406 | mac_addr_p += 2; | 1430 | mac_addr_p += 2; |
1407 | 1431 | ||
@@ -1416,7 +1440,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1416 | 1440 | ||
1417 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1441 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); |
1418 | 1442 | ||
1419 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1443 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1420 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; | 1444 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
1421 | int error; | 1445 | int error; |
1422 | 1446 | ||
@@ -1441,7 +1465,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1441 | 1465 | ||
1442 | rc = register_netdev(netdev); | 1466 | rc = register_netdev(netdev); |
1443 | 1467 | ||
1444 | if(rc) { | 1468 | if (rc) { |
1445 | netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); | 1469 | netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); |
1446 | free_netdev(netdev); | 1470 | free_netdev(netdev); |
1447 | return rc; | 1471 | return rc; |
@@ -1458,7 +1482,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
1458 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 1482 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1459 | int i; | 1483 | int i; |
1460 | 1484 | ||
1461 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | 1485 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
1462 | kobject_put(&adapter->rx_buff_pool[i].kobj); | 1486 | kobject_put(&adapter->rx_buff_pool[i].kobj); |
1463 | 1487 | ||
1464 | unregister_netdev(netdev); | 1488 | unregister_netdev(netdev); |
@@ -1473,8 +1497,8 @@ static struct attribute veth_active_attr; | |||
1473 | static struct attribute veth_num_attr; | 1497 | static struct attribute veth_num_attr; |
1474 | static struct attribute veth_size_attr; | 1498 | static struct attribute veth_size_attr; |
1475 | 1499 | ||
1476 | static ssize_t veth_pool_show(struct kobject * kobj, | 1500 | static ssize_t veth_pool_show(struct kobject *kobj, |
1477 | struct attribute * attr, char * buf) | 1501 | struct attribute *attr, char *buf) |
1478 | { | 1502 | { |
1479 | struct ibmveth_buff_pool *pool = container_of(kobj, | 1503 | struct ibmveth_buff_pool *pool = container_of(kobj, |
1480 | struct ibmveth_buff_pool, | 1504 | struct ibmveth_buff_pool, |
@@ -1489,8 +1513,8 @@ static ssize_t veth_pool_show(struct kobject * kobj, | |||
1489 | return 0; | 1513 | return 0; |
1490 | } | 1514 | } |
1491 | 1515 | ||
1492 | static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, | 1516 | static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, |
1493 | const char * buf, size_t count) | 1517 | const char *buf, size_t count) |
1494 | { | 1518 | { |
1495 | struct ibmveth_buff_pool *pool = container_of(kobj, | 1519 | struct ibmveth_buff_pool *pool = container_of(kobj, |
1496 | struct ibmveth_buff_pool, | 1520 | struct ibmveth_buff_pool, |
@@ -1504,7 +1528,7 @@ const char * buf, size_t count) | |||
1504 | if (attr == &veth_active_attr) { | 1528 | if (attr == &veth_active_attr) { |
1505 | if (value && !pool->active) { | 1529 | if (value && !pool->active) { |
1506 | if (netif_running(netdev)) { | 1530 | if (netif_running(netdev)) { |
1507 | if(ibmveth_alloc_buffer_pool(pool)) { | 1531 | if (ibmveth_alloc_buffer_pool(pool)) { |
1508 | netdev_err(netdev, | 1532 | netdev_err(netdev, |
1509 | "unable to alloc pool\n"); | 1533 | "unable to alloc pool\n"); |
1510 | return -ENOMEM; | 1534 | return -ENOMEM; |
@@ -1515,8 +1539,9 @@ const char * buf, size_t count) | |||
1515 | adapter->pool_config = 0; | 1539 | adapter->pool_config = 0; |
1516 | if ((rc = ibmveth_open(netdev))) | 1540 | if ((rc = ibmveth_open(netdev))) |
1517 | return rc; | 1541 | return rc; |
1518 | } else | 1542 | } else { |
1519 | pool->active = 1; | 1543 | pool->active = 1; |
1544 | } | ||
1520 | } else if (!value && pool->active) { | 1545 | } else if (!value && pool->active) { |
1521 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; | 1546 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; |
1522 | int i; | 1547 | int i; |
@@ -1547,9 +1572,9 @@ const char * buf, size_t count) | |||
1547 | pool->active = 0; | 1572 | pool->active = 0; |
1548 | } | 1573 | } |
1549 | } else if (attr == &veth_num_attr) { | 1574 | } else if (attr == &veth_num_attr) { |
1550 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | 1575 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { |
1551 | return -EINVAL; | 1576 | return -EINVAL; |
1552 | else { | 1577 | } else { |
1553 | if (netif_running(netdev)) { | 1578 | if (netif_running(netdev)) { |
1554 | adapter->pool_config = 1; | 1579 | adapter->pool_config = 1; |
1555 | ibmveth_close(netdev); | 1580 | ibmveth_close(netdev); |
@@ -1557,13 +1582,14 @@ const char * buf, size_t count) | |||
1557 | pool->size = value; | 1582 | pool->size = value; |
1558 | if ((rc = ibmveth_open(netdev))) | 1583 | if ((rc = ibmveth_open(netdev))) |
1559 | return rc; | 1584 | return rc; |
1560 | } else | 1585 | } else { |
1561 | pool->size = value; | 1586 | pool->size = value; |
1587 | } | ||
1562 | } | 1588 | } |
1563 | } else if (attr == &veth_size_attr) { | 1589 | } else if (attr == &veth_size_attr) { |
1564 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) | 1590 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { |
1565 | return -EINVAL; | 1591 | return -EINVAL; |
1566 | else { | 1592 | } else { |
1567 | if (netif_running(netdev)) { | 1593 | if (netif_running(netdev)) { |
1568 | adapter->pool_config = 1; | 1594 | adapter->pool_config = 1; |
1569 | ibmveth_close(netdev); | 1595 | ibmveth_close(netdev); |
@@ -1571,8 +1597,9 @@ const char * buf, size_t count) | |||
1571 | pool->buff_size = value; | 1597 | pool->buff_size = value; |
1572 | if ((rc = ibmveth_open(netdev))) | 1598 | if ((rc = ibmveth_open(netdev))) |
1573 | return rc; | 1599 | return rc; |
1574 | } else | 1600 | } else { |
1575 | pool->buff_size = value; | 1601 | pool->buff_size = value; |
1602 | } | ||
1576 | } | 1603 | } |
1577 | } | 1604 | } |
1578 | 1605 | ||
@@ -1582,16 +1609,16 @@ const char * buf, size_t count) | |||
1582 | } | 1609 | } |
1583 | 1610 | ||
1584 | 1611 | ||
1585 | #define ATTR(_name, _mode) \ | 1612 | #define ATTR(_name, _mode) \ |
1586 | struct attribute veth_##_name##_attr = { \ | 1613 | struct attribute veth_##_name##_attr = { \ |
1587 | .name = __stringify(_name), .mode = _mode, \ | 1614 | .name = __stringify(_name), .mode = _mode, \ |
1588 | }; | 1615 | }; |
1589 | 1616 | ||
1590 | static ATTR(active, 0644); | 1617 | static ATTR(active, 0644); |
1591 | static ATTR(num, 0644); | 1618 | static ATTR(num, 0644); |
1592 | static ATTR(size, 0644); | 1619 | static ATTR(size, 0644); |
1593 | 1620 | ||
1594 | static struct attribute * veth_pool_attrs[] = { | 1621 | static struct attribute *veth_pool_attrs[] = { |
1595 | &veth_active_attr, | 1622 | &veth_active_attr, |
1596 | &veth_num_attr, | 1623 | &veth_num_attr, |
1597 | &veth_size_attr, | 1624 | &veth_size_attr, |
@@ -1616,7 +1643,7 @@ static int ibmveth_resume(struct device *dev) | |||
1616 | return 0; | 1643 | return 0; |
1617 | } | 1644 | } |
1618 | 1645 | ||
1619 | static struct vio_device_id ibmveth_device_table[] __devinitdata= { | 1646 | static struct vio_device_id ibmveth_device_table[] __devinitdata = { |
1620 | { "network", "IBM,l-lan"}, | 1647 | { "network", "IBM,l-lan"}, |
1621 | { "", "" } | 1648 | { "", "" } |
1622 | }; | 1649 | }; |