aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:20:30 -0500
committerGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:21:47 -0500
commitd392da5207352f09030e95d9ea335a4225667ec0 (patch)
tree7d6cd1932afcad0a5619a5c504a6d93ca318187c /drivers/net/ibmveth.c
parente39d5ef678045d61812c1401f04fe8edb14d6359 (diff)
parent387c31c7e5c9805b0aef8833d1731a5fe7bdea14 (diff)
Merge v2.6.37-rc8 into powerpc/next
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c1031
1 files changed, 528 insertions, 503 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 2602852cc55a..c454b45ca7ec 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1,122 +1,84 @@
1/**************************************************************************/
2/* */
3/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4/* Copyright (C) 2003 IBM Corp. */
5/* Originally written by Dave Larson (larson1@us.ibm.com) */
6/* Maintained by Santiago Leon (santil@us.ibm.com) */
7/* */
8/* This program is free software; you can redistribute it and/or modify */
9/* it under the terms of the GNU General Public License as published by */
10/* the Free Software Foundation; either version 2 of the License, or */
11/* (at your option) any later version. */
12/* */
13/* This program is distributed in the hope that it will be useful, */
14/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16/* GNU General Public License for more details. */
17/* */
18/* You should have received a copy of the GNU General Public License */
19/* along with this program; if not, write to the Free Software */
20/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
21/* USA */
22/* */
23/* This module contains the implementation of a virtual ethernet device */
24/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25/* option of the RS/6000 Platform Architechture to interface with virtual */
26/* ethernet NICs that are presented to the partition by the hypervisor. */
27/* */
28/**************************************************************************/
29/* 1/*
30 TODO: 2 * IBM Power Virtual Ethernet Device Driver
31 - add support for sysfs 3 *
32 - possibly remove procfs support 4 * This program is free software; you can redistribute it and/or modify
33*/ 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2003, 2010
19 *
20 * Authors: Dave Larson <larson1@us.ibm.com>
21 * Santiago Leon <santil@linux.vnet.ibm.com>
22 * Brian King <brking@linux.vnet.ibm.com>
23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
25 */
34 26
35#include <linux/module.h> 27#include <linux/module.h>
36#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
37#include <linux/types.h> 29#include <linux/types.h>
38#include <linux/errno.h> 30#include <linux/errno.h>
39#include <linux/ioport.h>
40#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
41#include <linux/kernel.h> 32#include <linux/kernel.h>
42#include <linux/netdevice.h> 33#include <linux/netdevice.h>
43#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
44#include <linux/skbuff.h> 35#include <linux/skbuff.h>
45#include <linux/init.h> 36#include <linux/init.h>
46#include <linux/delay.h>
47#include <linux/mm.h> 37#include <linux/mm.h>
48#include <linux/pm.h> 38#include <linux/pm.h>
49#include <linux/ethtool.h> 39#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
51#include <linux/in.h> 40#include <linux/in.h>
52#include <linux/ip.h> 41#include <linux/ip.h>
42#include <linux/ipv6.h>
53#include <linux/slab.h> 43#include <linux/slab.h>
54#include <net/net_namespace.h>
55#include <asm/hvcall.h> 44#include <asm/hvcall.h>
56#include <asm/atomic.h> 45#include <asm/atomic.h>
57#include <asm/vio.h> 46#include <asm/vio.h>
58#include <asm/iommu.h> 47#include <asm/iommu.h>
59#include <asm/uaccess.h>
60#include <asm/firmware.h> 48#include <asm/firmware.h>
61#include <linux/seq_file.h>
62 49
63#include "ibmveth.h" 50#include "ibmveth.h"
64 51
65#undef DEBUG
66
67#define ibmveth_printk(fmt, args...) \
68 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
69
70#define ibmveth_error_printk(fmt, args...) \
71 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
72
73#ifdef DEBUG
74#define ibmveth_debug_printk_no_adapter(fmt, args...) \
75 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
76#define ibmveth_debug_printk(fmt, args...) \
77 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
78#define ibmveth_assert(expr) \
79 if(!(expr)) { \
80 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
81 BUG(); \
82 }
83#else
84#define ibmveth_debug_printk_no_adapter(fmt, args...)
85#define ibmveth_debug_printk(fmt, args...)
86#define ibmveth_assert(expr)
87#endif
88
89static int ibmveth_open(struct net_device *dev);
90static int ibmveth_close(struct net_device *dev);
91static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
92static int ibmveth_poll(struct napi_struct *napi, int budget);
93static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
94static void ibmveth_set_multicast_list(struct net_device *dev);
95static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
96static void ibmveth_proc_register_driver(void);
97static void ibmveth_proc_unregister_driver(void);
98static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
99static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
100static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
101static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
102static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); 54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
103static struct kobj_type ktype_veth_pool;
104 55
56static struct kobj_type ktype_veth_pool;
105 57
106#ifdef CONFIG_PROC_FS
107#define IBMVETH_PROC_DIR "ibmveth"
108static struct proc_dir_entry *ibmveth_proc_dir;
109#endif
110 58
111static const char ibmveth_driver_name[] = "ibmveth"; 59static const char ibmveth_driver_name[] = "ibmveth";
112static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; 60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
113#define ibmveth_driver_version "1.03" 61#define ibmveth_driver_version "1.04"
114 62
115MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); 63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
116MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); 64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
117MODULE_LICENSE("GPL"); 65MODULE_LICENSE("GPL");
118MODULE_VERSION(ibmveth_driver_version); 66MODULE_VERSION(ibmveth_driver_version);
119 67
68static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
73static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
78static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
120struct ibmveth_stat { 82struct ibmveth_stat {
121 char name[ETH_GSTRING_LEN]; 83 char name[ETH_GSTRING_LEN];
122 int offset; 84 int offset;
@@ -128,12 +90,16 @@ struct ibmveth_stat {
128struct ibmveth_stat ibmveth_stats[] = { 90struct ibmveth_stat ibmveth_stats[] = {
129 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, 91 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
130 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, 92 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
131 { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, 93 { "replenish_add_buff_failure",
132 { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, 94 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
95 { "replenish_add_buff_success",
96 IBMVETH_STAT_OFF(replenish_add_buff_success) },
133 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, 97 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
134 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, 98 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
135 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, 99 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
136 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, 100 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
101 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
102 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
137}; 103};
138 104
139/* simple methods of getting data from the current rxq entry */ 105/* simple methods of getting data from the current rxq entry */
@@ -144,41 +110,44 @@ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
144 110
145static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) 111static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
146{ 112{
147 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; 113 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
114 IBMVETH_RXQ_TOGGLE_SHIFT;
148} 115}
149 116
150static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) 117static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
151{ 118{
152 return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); 119 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
153} 120}
154 121
155static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) 122static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
156{ 123{
157 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); 124 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
158} 125}
159 126
160static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) 127static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
161{ 128{
162 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); 129 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
163} 130}
164 131
165static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 132static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
166{ 133{
167 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); 134 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
168} 135}
169 136
170static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) 137static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
171{ 138{
172 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); 139 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
173} 140}
174 141
175/* setup the initial settings for a buffer pool */ 142/* setup the initial settings for a buffer pool */
176static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) 143static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
144 u32 pool_index, u32 pool_size,
145 u32 buff_size, u32 pool_active)
177{ 146{
178 pool->size = pool_size; 147 pool->size = pool_size;
179 pool->index = pool_index; 148 pool->index = pool_index;
180 pool->buff_size = buff_size; 149 pool->buff_size = buff_size;
181 pool->threshold = pool_size / 2; 150 pool->threshold = pool_size * 7 / 8;
182 pool->active = pool_active; 151 pool->active = pool_active;
183} 152}
184 153
@@ -189,12 +158,11 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
189 158
190 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); 159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
191 160
192 if(!pool->free_map) { 161 if (!pool->free_map)
193 return -1; 162 return -1;
194 }
195 163
196 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); 164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
197 if(!pool->dma_addr) { 165 if (!pool->dma_addr) {
198 kfree(pool->free_map); 166 kfree(pool->free_map);
199 pool->free_map = NULL; 167 pool->free_map = NULL;
200 return -1; 168 return -1;
@@ -202,7 +170,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
202 170
203 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); 171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
204 172
205 if(!pool->skbuff) { 173 if (!pool->skbuff) {
206 kfree(pool->dma_addr); 174 kfree(pool->dma_addr);
207 pool->dma_addr = NULL; 175 pool->dma_addr = NULL;
208 176
@@ -213,9 +181,8 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
213 181
214 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); 182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
215 183
216 for(i = 0; i < pool->size; ++i) { 184 for (i = 0; i < pool->size; ++i)
217 pool->free_map[i] = i; 185 pool->free_map[i] = i;
218 }
219 186
220 atomic_set(&pool->available, 0); 187 atomic_set(&pool->available, 0);
221 pool->producer_index = 0; 188 pool->producer_index = 0;
@@ -224,10 +191,19 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
224 return 0; 191 return 0;
225} 192}
226 193
194static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
195{
196 unsigned long offset;
197
198 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
199 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
200}
201
227/* replenish the buffers for a pool. note that we don't need to 202/* replenish the buffers for a pool. note that we don't need to
228 * skb_reserve these since they are used for incoming... 203 * skb_reserve these since they are used for incoming...
229 */ 204 */
230static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 205static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
206 struct ibmveth_buff_pool *pool)
231{ 207{
232 u32 i; 208 u32 i;
233 u32 count = pool->size - atomic_read(&pool->available); 209 u32 count = pool->size - atomic_read(&pool->available);
@@ -240,23 +216,26 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
240 216
241 mb(); 217 mb();
242 218
243 for(i = 0; i < count; ++i) { 219 for (i = 0; i < count; ++i) {
244 union ibmveth_buf_desc desc; 220 union ibmveth_buf_desc desc;
245 221
246 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
247 223
248 if(!skb) { 224 if (!skb) {
249 ibmveth_debug_printk("replenish: unable to allocate skb\n"); 225 netdev_dbg(adapter->netdev,
226 "replenish: unable to allocate skb\n");
250 adapter->replenish_no_mem++; 227 adapter->replenish_no_mem++;
251 break; 228 break;
252 } 229 }
253 230
254 free_index = pool->consumer_index; 231 free_index = pool->consumer_index;
255 pool->consumer_index = (pool->consumer_index + 1) % pool->size; 232 pool->consumer_index++;
233 if (pool->consumer_index >= pool->size)
234 pool->consumer_index = 0;
256 index = pool->free_map[free_index]; 235 index = pool->free_map[free_index];
257 236
258 ibmveth_assert(index != IBM_VETH_INVALID_MAP); 237 BUG_ON(index == IBM_VETH_INVALID_MAP);
259 ibmveth_assert(pool->skbuff[index] == NULL); 238 BUG_ON(pool->skbuff[index] != NULL);
260 239
261 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 240 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
262 pool->buff_size, DMA_FROM_DEVICE); 241 pool->buff_size, DMA_FROM_DEVICE);
@@ -269,16 +248,23 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
269 pool->skbuff[index] = skb; 248 pool->skbuff[index] = skb;
270 249
271 correlator = ((u64)pool->index << 32) | index; 250 correlator = ((u64)pool->index << 32) | index;
272 *(u64*)skb->data = correlator; 251 *(u64 *)skb->data = correlator;
273 252
274 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; 253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
275 desc.fields.address = dma_addr; 254 desc.fields.address = dma_addr;
276 255
277 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 256 if (rx_flush) {
257 unsigned int len = min(pool->buff_size,
258 adapter->netdev->mtu +
259 IBMVETH_BUFF_OH);
260 ibmveth_flush_buffer(skb->data, len);
261 }
262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
263 desc.desc);
278 264
279 if (lpar_rc != H_SUCCESS) 265 if (lpar_rc != H_SUCCESS) {
280 goto failure; 266 goto failure;
281 else { 267 } else {
282 buffers_added++; 268 buffers_added++;
283 adapter->replenish_add_buff_success++; 269 adapter->replenish_add_buff_success++;
284 } 270 }
@@ -313,26 +299,31 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
313 299
314 adapter->replenish_task_cycles++; 300 adapter->replenish_task_cycles++;
315 301
316 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) 302 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
317 if(adapter->rx_buff_pool[i].active) 303 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
318 ibmveth_replenish_buffer_pool(adapter, 304
319 &adapter->rx_buff_pool[i]); 305 if (pool->active &&
306 (atomic_read(&pool->available) < pool->threshold))
307 ibmveth_replenish_buffer_pool(adapter, pool);
308 }
320 309
321 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 310 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
311 4096 - 8);
322} 312}
323 313
324/* empty and free ana buffer pool - also used to do cleanup in error paths */ 314/* empty and free ana buffer pool - also used to do cleanup in error paths */
325static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 315static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
316 struct ibmveth_buff_pool *pool)
326{ 317{
327 int i; 318 int i;
328 319
329 kfree(pool->free_map); 320 kfree(pool->free_map);
330 pool->free_map = NULL; 321 pool->free_map = NULL;
331 322
332 if(pool->skbuff && pool->dma_addr) { 323 if (pool->skbuff && pool->dma_addr) {
333 for(i = 0; i < pool->size; ++i) { 324 for (i = 0; i < pool->size; ++i) {
334 struct sk_buff *skb = pool->skbuff[i]; 325 struct sk_buff *skb = pool->skbuff[i];
335 if(skb) { 326 if (skb) {
336 dma_unmap_single(&adapter->vdev->dev, 327 dma_unmap_single(&adapter->vdev->dev,
337 pool->dma_addr[i], 328 pool->dma_addr[i],
338 pool->buff_size, 329 pool->buff_size,
@@ -343,31 +334,32 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
343 } 334 }
344 } 335 }
345 336
346 if(pool->dma_addr) { 337 if (pool->dma_addr) {
347 kfree(pool->dma_addr); 338 kfree(pool->dma_addr);
348 pool->dma_addr = NULL; 339 pool->dma_addr = NULL;
349 } 340 }
350 341
351 if(pool->skbuff) { 342 if (pool->skbuff) {
352 kfree(pool->skbuff); 343 kfree(pool->skbuff);
353 pool->skbuff = NULL; 344 pool->skbuff = NULL;
354 } 345 }
355} 346}
356 347
357/* remove a buffer from a pool */ 348/* remove a buffer from a pool */
358static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) 349static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
350 u64 correlator)
359{ 351{
360 unsigned int pool = correlator >> 32; 352 unsigned int pool = correlator >> 32;
361 unsigned int index = correlator & 0xffffffffUL; 353 unsigned int index = correlator & 0xffffffffUL;
362 unsigned int free_index; 354 unsigned int free_index;
363 struct sk_buff *skb; 355 struct sk_buff *skb;
364 356
365 ibmveth_assert(pool < IbmVethNumBufferPools); 357 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
366 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 358 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
367 359
368 skb = adapter->rx_buff_pool[pool].skbuff[index]; 360 skb = adapter->rx_buff_pool[pool].skbuff[index];
369 361
370 ibmveth_assert(skb != NULL); 362 BUG_ON(skb == NULL);
371 363
372 adapter->rx_buff_pool[pool].skbuff[index] = NULL; 364 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
373 365
@@ -377,9 +369,10 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
377 DMA_FROM_DEVICE); 369 DMA_FROM_DEVICE);
378 370
379 free_index = adapter->rx_buff_pool[pool].producer_index; 371 free_index = adapter->rx_buff_pool[pool].producer_index;
380 adapter->rx_buff_pool[pool].producer_index 372 adapter->rx_buff_pool[pool].producer_index++;
381 = (adapter->rx_buff_pool[pool].producer_index + 1) 373 if (adapter->rx_buff_pool[pool].producer_index >=
382 % adapter->rx_buff_pool[pool].size; 374 adapter->rx_buff_pool[pool].size)
375 adapter->rx_buff_pool[pool].producer_index = 0;
383 adapter->rx_buff_pool[pool].free_map[free_index] = index; 376 adapter->rx_buff_pool[pool].free_map[free_index] = index;
384 377
385 mb(); 378 mb();
@@ -394,8 +387,8 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
394 unsigned int pool = correlator >> 32; 387 unsigned int pool = correlator >> 32;
395 unsigned int index = correlator & 0xffffffffUL; 388 unsigned int index = correlator & 0xffffffffUL;
396 389
397 ibmveth_assert(pool < IbmVethNumBufferPools); 390 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
398 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 391 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
399 392
400 return adapter->rx_buff_pool[pool].skbuff[index]; 393 return adapter->rx_buff_pool[pool].skbuff[index];
401} 394}
@@ -410,10 +403,10 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
410 union ibmveth_buf_desc desc; 403 union ibmveth_buf_desc desc;
411 unsigned long lpar_rc; 404 unsigned long lpar_rc;
412 405
413 ibmveth_assert(pool < IbmVethNumBufferPools); 406 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
414 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 407 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
415 408
416 if(!adapter->rx_buff_pool[pool].active) { 409 if (!adapter->rx_buff_pool[pool].active) {
417 ibmveth_rxq_harvest_buffer(adapter); 410 ibmveth_rxq_harvest_buffer(adapter);
418 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 411 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
419 return; 412 return;
@@ -425,12 +418,13 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
425 418
426 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 419 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
427 420
428 if(lpar_rc != H_SUCCESS) { 421 if (lpar_rc != H_SUCCESS) {
429 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); 422 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
423 "during recycle rc=%ld", lpar_rc);
430 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 424 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
431 } 425 }
432 426
433 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 427 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
434 adapter->rx_queue.index = 0; 428 adapter->rx_queue.index = 0;
435 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 429 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
436 } 430 }
@@ -440,7 +434,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
440{ 434{
441 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 435 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
442 436
443 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 437 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
444 adapter->rx_queue.index = 0; 438 adapter->rx_queue.index = 0;
445 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 439 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
446 } 440 }
@@ -451,7 +445,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
451 int i; 445 int i;
452 struct device *dev = &adapter->vdev->dev; 446 struct device *dev = &adapter->vdev->dev;
453 447
454 if(adapter->buffer_list_addr != NULL) { 448 if (adapter->buffer_list_addr != NULL) {
455 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { 449 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
456 dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 450 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
457 DMA_BIDIRECTIONAL); 451 DMA_BIDIRECTIONAL);
@@ -461,7 +455,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
461 adapter->buffer_list_addr = NULL; 455 adapter->buffer_list_addr = NULL;
462 } 456 }
463 457
464 if(adapter->filter_list_addr != NULL) { 458 if (adapter->filter_list_addr != NULL) {
465 if (!dma_mapping_error(dev, adapter->filter_list_dma)) { 459 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
466 dma_unmap_single(dev, adapter->filter_list_dma, 4096, 460 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
467 DMA_BIDIRECTIONAL); 461 DMA_BIDIRECTIONAL);
@@ -471,7 +465,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
471 adapter->filter_list_addr = NULL; 465 adapter->filter_list_addr = NULL;
472 } 466 }
473 467
474 if(adapter->rx_queue.queue_addr != NULL) { 468 if (adapter->rx_queue.queue_addr != NULL) {
475 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { 469 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
476 dma_unmap_single(dev, 470 dma_unmap_single(dev,
477 adapter->rx_queue.queue_dma, 471 adapter->rx_queue.queue_dma,
@@ -483,7 +477,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
483 adapter->rx_queue.queue_addr = NULL; 477 adapter->rx_queue.queue_addr = NULL;
484 } 478 }
485 479
486 for(i = 0; i<IbmVethNumBufferPools; i++) 480 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
487 if (adapter->rx_buff_pool[i].active) 481 if (adapter->rx_buff_pool[i].active)
488 ibmveth_free_buffer_pool(adapter, 482 ibmveth_free_buffer_pool(adapter,
489 &adapter->rx_buff_pool[i]); 483 &adapter->rx_buff_pool[i]);
@@ -506,9 +500,11 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
506{ 500{
507 int rc, try_again = 1; 501 int rc, try_again = 1;
508 502
509 /* After a kexec the adapter will still be open, so our attempt to 503 /*
510 * open it will fail. So if we get a failure we free the adapter and 504 * After a kexec the adapter will still be open, so our attempt to
511 * try again, but only once. */ 505 * open it will fail. So if we get a failure we free the adapter and
506 * try again, but only once.
507 */
512retry: 508retry:
513 rc = h_register_logical_lan(adapter->vdev->unit_address, 509 rc = h_register_logical_lan(adapter->vdev->unit_address,
514 adapter->buffer_list_dma, rxq_desc.desc, 510 adapter->buffer_list_dma, rxq_desc.desc,
@@ -537,31 +533,32 @@ static int ibmveth_open(struct net_device *netdev)
537 int i; 533 int i;
538 struct device *dev; 534 struct device *dev;
539 535
540 ibmveth_debug_printk("open starting\n"); 536 netdev_dbg(netdev, "open starting\n");
541 537
542 napi_enable(&adapter->napi); 538 napi_enable(&adapter->napi);
543 539
544 for(i = 0; i<IbmVethNumBufferPools; i++) 540 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
545 rxq_entries += adapter->rx_buff_pool[i].size; 541 rxq_entries += adapter->rx_buff_pool[i].size;
546 542
547 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 543 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
548 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 544 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
549 545
550 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { 546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
551 ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); 547 netdev_err(netdev, "unable to allocate filter or buffer list "
552 ibmveth_cleanup(adapter); 548 "pages\n");
553 napi_disable(&adapter->napi); 549 rc = -ENOMEM;
554 return -ENOMEM; 550 goto err_out;
555 } 551 }
556 552
557 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; 553 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
558 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); 554 rxq_entries;
555 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
556 GFP_KERNEL);
559 557
560 if(!adapter->rx_queue.queue_addr) { 558 if (!adapter->rx_queue.queue_addr) {
561 ibmveth_error_printk("unable to allocate rx queue pages\n"); 559 netdev_err(netdev, "unable to allocate rx queue pages\n");
562 ibmveth_cleanup(adapter); 560 rc = -ENOMEM;
563 napi_disable(&adapter->napi); 561 goto err_out;
564 return -ENOMEM;
565 } 562 }
566 563
567 dev = &adapter->vdev->dev; 564 dev = &adapter->vdev->dev;
@@ -577,10 +574,10 @@ static int ibmveth_open(struct net_device *netdev)
577 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || 574 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
578 (dma_mapping_error(dev, adapter->filter_list_dma)) || 575 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
579 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { 576 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
580 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 577 netdev_err(netdev, "unable to map filter or buffer list "
581 ibmveth_cleanup(adapter); 578 "pages\n");
582 napi_disable(&adapter->napi); 579 rc = -ENOMEM;
583 return -ENOMEM; 580 goto err_out;
584 } 581 }
585 582
586 adapter->rx_queue.index = 0; 583 adapter->rx_queue.index = 0;
@@ -590,79 +587,86 @@ static int ibmveth_open(struct net_device *netdev)
590 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 587 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
591 mac_address = mac_address >> 16; 588 mac_address = mac_address >> 16;
592 589
593 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; 590 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
591 adapter->rx_queue.queue_len;
594 rxq_desc.fields.address = adapter->rx_queue.queue_dma; 592 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
595 593
596 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); 594 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
597 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); 595 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
598 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 596 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
599 597
600 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 598 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
601 599
602 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); 600 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
603 601
604 if(lpar_rc != H_SUCCESS) { 602 if (lpar_rc != H_SUCCESS) {
605 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 603 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
606 ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n", 604 lpar_rc);
605 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
606 "desc:0x%llx MAC:0x%llx\n",
607 adapter->buffer_list_dma, 607 adapter->buffer_list_dma,
608 adapter->filter_list_dma, 608 adapter->filter_list_dma,
609 rxq_desc.desc, 609 rxq_desc.desc,
610 mac_address); 610 mac_address);
611 ibmveth_cleanup(adapter); 611 rc = -ENONET;
612 napi_disable(&adapter->napi); 612 goto err_out;
613 return -ENONET;
614 } 613 }
615 614
616 for(i = 0; i<IbmVethNumBufferPools; i++) { 615 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
617 if(!adapter->rx_buff_pool[i].active) 616 if (!adapter->rx_buff_pool[i].active)
618 continue; 617 continue;
619 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 618 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
620 ibmveth_error_printk("unable to alloc pool\n"); 619 netdev_err(netdev, "unable to alloc pool\n");
621 adapter->rx_buff_pool[i].active = 0; 620 adapter->rx_buff_pool[i].active = 0;
622 ibmveth_cleanup(adapter); 621 rc = -ENOMEM;
623 napi_disable(&adapter->napi); 622 goto err_out;
624 return -ENOMEM ;
625 } 623 }
626 } 624 }
627 625
628 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); 626 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
629 if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 627 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
630 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 628 netdev);
629 if (rc != 0) {
630 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
631 netdev->irq, rc);
631 do { 632 do {
632 rc = h_free_logical_lan(adapter->vdev->unit_address); 633 rc = h_free_logical_lan(adapter->vdev->unit_address);
633 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 634 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
634 635
635 ibmveth_cleanup(adapter); 636 goto err_out;
636 napi_disable(&adapter->napi);
637 return rc;
638 } 637 }
639 638
640 adapter->bounce_buffer = 639 adapter->bounce_buffer =
641 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 640 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
642 if (!adapter->bounce_buffer) { 641 if (!adapter->bounce_buffer) {
643 ibmveth_error_printk("unable to allocate bounce buffer\n"); 642 netdev_err(netdev, "unable to allocate bounce buffer\n");
644 ibmveth_cleanup(adapter); 643 rc = -ENOMEM;
645 napi_disable(&adapter->napi); 644 goto err_out_free_irq;
646 return -ENOMEM;
647 } 645 }
648 adapter->bounce_buffer_dma = 646 adapter->bounce_buffer_dma =
649 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 647 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
650 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 648 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
651 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 649 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
652 ibmveth_error_printk("unable to map bounce buffer\n"); 650 netdev_err(netdev, "unable to map bounce buffer\n");
653 ibmveth_cleanup(adapter); 651 rc = -ENOMEM;
654 napi_disable(&adapter->napi); 652 goto err_out_free_irq;
655 return -ENOMEM;
656 } 653 }
657 654
658 ibmveth_debug_printk("initial replenish cycle\n"); 655 netdev_dbg(netdev, "initial replenish cycle\n");
659 ibmveth_interrupt(netdev->irq, netdev); 656 ibmveth_interrupt(netdev->irq, netdev);
660 657
661 netif_start_queue(netdev); 658 netif_start_queue(netdev);
662 659
663 ibmveth_debug_printk("open complete\n"); 660 netdev_dbg(netdev, "open complete\n");
664 661
665 return 0; 662 return 0;
663
664err_out_free_irq:
665 free_irq(netdev->irq, netdev);
666err_out:
667 ibmveth_cleanup(adapter);
668 napi_disable(&adapter->napi);
669 return rc;
666} 670}
667 671
668static int ibmveth_close(struct net_device *netdev) 672static int ibmveth_close(struct net_device *netdev)
@@ -670,7 +674,7 @@ static int ibmveth_close(struct net_device *netdev)
670 struct ibmveth_adapter *adapter = netdev_priv(netdev); 674 struct ibmveth_adapter *adapter = netdev_priv(netdev);
671 long lpar_rc; 675 long lpar_rc;
672 676
673 ibmveth_debug_printk("close starting\n"); 677 netdev_dbg(netdev, "close starting\n");
674 678
675 napi_disable(&adapter->napi); 679 napi_disable(&adapter->napi);
676 680
@@ -683,26 +687,29 @@ static int ibmveth_close(struct net_device *netdev)
683 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 687 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
684 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 688 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
685 689
686 if(lpar_rc != H_SUCCESS) 690 if (lpar_rc != H_SUCCESS) {
687 { 691 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
688 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 692 "continuing with close\n", lpar_rc);
689 lpar_rc);
690 } 693 }
691 694
692 free_irq(netdev->irq, netdev); 695 free_irq(netdev->irq, netdev);
693 696
694 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 697 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
698 4096 - 8);
695 699
696 ibmveth_cleanup(adapter); 700 ibmveth_cleanup(adapter);
697 701
698 ibmveth_debug_printk("close complete\n"); 702 netdev_dbg(netdev, "close complete\n");
699 703
700 return 0; 704 return 0;
701} 705}
702 706
703static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 707static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
704 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); 708{
705 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); 709 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
710 SUPPORTED_FIBRE);
711 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
712 ADVERTISED_FIBRE);
706 cmd->speed = SPEED_1000; 713 cmd->speed = SPEED_1000;
707 cmd->duplex = DUPLEX_FULL; 714 cmd->duplex = DUPLEX_FULL;
708 cmd->port = PORT_FIBRE; 715 cmd->port = PORT_FIBRE;
@@ -714,12 +721,16 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
714 return 0; 721 return 0;
715} 722}
716 723
717static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { 724static void netdev_get_drvinfo(struct net_device *dev,
725 struct ethtool_drvinfo *info)
726{
718 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); 727 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
719 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); 728 strncpy(info->version, ibmveth_driver_version,
729 sizeof(info->version) - 1);
720} 730}
721 731
722static u32 netdev_get_link(struct net_device *dev) { 732static u32 netdev_get_link(struct net_device *dev)
733{
723 return 1; 734 return 1;
724} 735}
725 736
@@ -727,18 +738,20 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
727{ 738{
728 struct ibmveth_adapter *adapter = netdev_priv(dev); 739 struct ibmveth_adapter *adapter = netdev_priv(dev);
729 740
730 if (data) 741 if (data) {
731 adapter->rx_csum = 1; 742 adapter->rx_csum = 1;
732 else { 743 } else {
733 /* 744 /*
734 * Since the ibmveth firmware interface does not have the concept of 745 * Since the ibmveth firmware interface does not have the
735 * separate tx/rx checksum offload enable, if rx checksum is disabled 746 * concept of separate tx/rx checksum offload enable, if rx
736 * we also have to disable tx checksum offload. Once we disable rx 747 * checksum is disabled we also have to disable tx checksum
737 * checksum offload, we are no longer allowed to send tx buffers that 748 * offload. Once we disable rx checksum offload, we are no
738 * are not properly checksummed. 749 * longer allowed to send tx buffers that are not properly
750 * checksummed.
739 */ 751 */
740 adapter->rx_csum = 0; 752 adapter->rx_csum = 0;
741 dev->features &= ~NETIF_F_IP_CSUM; 753 dev->features &= ~NETIF_F_IP_CSUM;
754 dev->features &= ~NETIF_F_IPV6_CSUM;
742 } 755 }
743} 756}
744 757
@@ -747,10 +760,15 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
747 struct ibmveth_adapter *adapter = netdev_priv(dev); 760 struct ibmveth_adapter *adapter = netdev_priv(dev);
748 761
749 if (data) { 762 if (data) {
750 dev->features |= NETIF_F_IP_CSUM; 763 if (adapter->fw_ipv4_csum_support)
764 dev->features |= NETIF_F_IP_CSUM;
765 if (adapter->fw_ipv6_csum_support)
766 dev->features |= NETIF_F_IPV6_CSUM;
751 adapter->rx_csum = 1; 767 adapter->rx_csum = 1;
752 } else 768 } else {
753 dev->features &= ~NETIF_F_IP_CSUM; 769 dev->features &= ~NETIF_F_IP_CSUM;
770 dev->features &= ~NETIF_F_IPV6_CSUM;
771 }
754} 772}
755 773
756static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, 774static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
@@ -758,7 +776,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
758{ 776{
759 struct ibmveth_adapter *adapter = netdev_priv(dev); 777 struct ibmveth_adapter *adapter = netdev_priv(dev);
760 unsigned long set_attr, clr_attr, ret_attr; 778 unsigned long set_attr, clr_attr, ret_attr;
761 long ret; 779 unsigned long set_attr6, clr_attr6;
780 long ret, ret6;
762 int rc1 = 0, rc2 = 0; 781 int rc1 = 0, rc2 = 0;
763 int restart = 0; 782 int restart = 0;
764 783
@@ -772,10 +791,13 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
772 set_attr = 0; 791 set_attr = 0;
773 clr_attr = 0; 792 clr_attr = 0;
774 793
775 if (data) 794 if (data) {
776 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 795 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
777 else 796 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
797 } else {
778 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 798 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
799 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
800 }
779 801
780 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 802 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
781 803
@@ -786,18 +808,39 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
786 set_attr, &ret_attr); 808 set_attr, &ret_attr);
787 809
788 if (ret != H_SUCCESS) { 810 if (ret != H_SUCCESS) {
789 rc1 = -EIO; 811 netdev_err(dev, "unable to change IPv4 checksum "
790 ibmveth_error_printk("unable to change checksum offload settings." 812 "offload settings. %d rc=%ld\n",
791 " %d rc=%ld\n", data, ret); 813 data, ret);
792 814
793 ret = h_illan_attributes(adapter->vdev->unit_address, 815 ret = h_illan_attributes(adapter->vdev->unit_address,
794 set_attr, clr_attr, &ret_attr); 816 set_attr, clr_attr, &ret_attr);
817 } else {
818 adapter->fw_ipv4_csum_support = data;
819 }
820
821 ret6 = h_illan_attributes(adapter->vdev->unit_address,
822 clr_attr6, set_attr6, &ret_attr);
823
824 if (ret6 != H_SUCCESS) {
825 netdev_err(dev, "unable to change IPv6 checksum "
826 "offload settings. %d rc=%ld\n",
827 data, ret);
828
829 ret = h_illan_attributes(adapter->vdev->unit_address,
830 set_attr6, clr_attr6,
831 &ret_attr);
795 } else 832 } else
833 adapter->fw_ipv6_csum_support = data;
834
835 if (ret == H_SUCCESS || ret6 == H_SUCCESS)
796 done(dev, data); 836 done(dev, data);
837 else
838 rc1 = -EIO;
797 } else { 839 } else {
798 rc1 = -EIO; 840 rc1 = -EIO;
799 ibmveth_error_printk("unable to change checksum offload settings." 841 netdev_err(dev, "unable to change checksum offload settings."
800 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); 842 " %d rc=%ld ret_attr=%lx\n", data, ret,
843 ret_attr);
801 } 844 }
802 845
803 if (restart) 846 if (restart)
@@ -821,13 +864,14 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
821 struct ibmveth_adapter *adapter = netdev_priv(dev); 864 struct ibmveth_adapter *adapter = netdev_priv(dev);
822 int rc = 0; 865 int rc = 0;
823 866
824 if (data && (dev->features & NETIF_F_IP_CSUM)) 867 if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
825 return 0; 868 return 0;
826 if (!data && !(dev->features & NETIF_F_IP_CSUM)) 869 if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
827 return 0; 870 return 0;
828 871
829 if (data && !adapter->rx_csum) 872 if (data && !adapter->rx_csum)
830 rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); 873 rc = ibmveth_set_csum_offload(dev, data,
874 ibmveth_set_tx_csum_flags);
831 else 875 else
832 ibmveth_set_tx_csum_flags(dev, data); 876 ibmveth_set_tx_csum_flags(dev, data);
833 877
@@ -881,6 +925,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
881 .get_strings = ibmveth_get_strings, 925 .get_strings = ibmveth_get_strings,
882 .get_sset_count = ibmveth_get_sset_count, 926 .get_sset_count = ibmveth_get_sset_count,
883 .get_ethtool_stats = ibmveth_get_ethtool_stats, 927 .get_ethtool_stats = ibmveth_get_ethtool_stats,
928 .set_sg = ethtool_op_set_sg,
884}; 929};
885 930
886static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 931static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -890,129 +935,216 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
890 935
891#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) 936#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
892 937
893static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, 938static int ibmveth_send(struct ibmveth_adapter *adapter,
894 struct net_device *netdev) 939 union ibmveth_buf_desc *descs)
895{ 940{
896 struct ibmveth_adapter *adapter = netdev_priv(netdev);
897 union ibmveth_buf_desc desc;
898 unsigned long lpar_rc;
899 unsigned long correlator; 941 unsigned long correlator;
900 unsigned long flags;
901 unsigned int retry_count; 942 unsigned int retry_count;
902 unsigned int tx_dropped = 0; 943 unsigned long ret;
903 unsigned int tx_bytes = 0; 944
904 unsigned int tx_packets = 0; 945 /*
905 unsigned int tx_send_failed = 0; 946 * The retry count sets a maximum for the number of broadcast and
906 unsigned int tx_map_failed = 0; 947 * multicast destinations within the system.
907 int used_bounce = 0; 948 */
908 unsigned long data_dma_addr; 949 retry_count = 1024;
950 correlator = 0;
951 do {
952 ret = h_send_logical_lan(adapter->vdev->unit_address,
953 descs[0].desc, descs[1].desc,
954 descs[2].desc, descs[3].desc,
955 descs[4].desc, descs[5].desc,
956 correlator, &correlator);
957 } while ((ret == H_BUSY) && (retry_count--));
909 958
910 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 959 if (ret != H_SUCCESS && ret != H_DROPPED) {
960 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
961 "with rc=%ld\n", ret);
962 return 1;
963 }
964
965 return 0;
966}
911 967
968static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
969 struct net_device *netdev)
970{
971 struct ibmveth_adapter *adapter = netdev_priv(netdev);
972 unsigned int desc_flags;
973 union ibmveth_buf_desc descs[6];
974 int last, i;
975 int force_bounce = 0;
976
977 /*
978 * veth handles a maximum of 6 segments including the header, so
979 * we have to linearize the skb if there are more than this.
980 */
981 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
982 netdev->stats.tx_dropped++;
983 goto out;
984 }
985
986 /* veth can't checksum offload UDP */
912 if (skb->ip_summed == CHECKSUM_PARTIAL && 987 if (skb->ip_summed == CHECKSUM_PARTIAL &&
913 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { 988 ((skb->protocol == htons(ETH_P_IP) &&
914 ibmveth_error_printk("tx: failed to checksum packet\n"); 989 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
915 tx_dropped++; 990 (skb->protocol == htons(ETH_P_IPV6) &&
991 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
992 skb_checksum_help(skb)) {
993
994 netdev_err(netdev, "tx: failed to checksum packet\n");
995 netdev->stats.tx_dropped++;
916 goto out; 996 goto out;
917 } 997 }
918 998
999 desc_flags = IBMVETH_BUF_VALID;
1000
919 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1001 if (skb->ip_summed == CHECKSUM_PARTIAL) {
920 unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; 1002 unsigned char *buf = skb_transport_header(skb) +
1003 skb->csum_offset;
921 1004
922 desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); 1005 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
923 1006
924 /* Need to zero out the checksum */ 1007 /* Need to zero out the checksum */
925 buf[0] = 0; 1008 buf[0] = 0;
926 buf[1] = 0; 1009 buf[1] = 0;
927 } 1010 }
928 1011
929 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 1012retry_bounce:
930 skb->len, DMA_TO_DEVICE); 1013 memset(descs, 0, sizeof(descs));
931 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { 1014
932 if (!firmware_has_feature(FW_FEATURE_CMO)) 1015 /*
933 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 1016 * If a linear packet is below the rx threshold then
1017 * copy it into the static bounce buffer. This avoids the
1018 * cost of a TCE insert and remove.
1019 */
1020 if (force_bounce || (!skb_is_nonlinear(skb) &&
1021 (skb->len < tx_copybreak))) {
934 skb_copy_from_linear_data(skb, adapter->bounce_buffer, 1022 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
935 skb->len); 1023 skb->len);
936 desc.fields.address = adapter->bounce_buffer_dma; 1024
937 tx_map_failed++; 1025 descs[0].fields.flags_len = desc_flags | skb->len;
938 used_bounce = 1; 1026 descs[0].fields.address = adapter->bounce_buffer_dma;
939 wmb(); 1027
940 } else 1028 if (ibmveth_send(adapter, descs)) {
941 desc.fields.address = data_dma_addr; 1029 adapter->tx_send_failed++;
942 1030 netdev->stats.tx_dropped++;
943 /* send the frame. Arbitrarily set retrycount to 1024 */ 1031 } else {
944 correlator = 0; 1032 netdev->stats.tx_packets++;
945 retry_count = 1024; 1033 netdev->stats.tx_bytes += skb->len;
946 do { 1034 }
947 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, 1035
948 desc.desc, 0, 0, 0, 0, 0, 1036 goto out;
949 correlator, &correlator); 1037 }
950 } while ((lpar_rc == H_BUSY) && (retry_count--)); 1038
951 1039 /* Map the header */
952 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { 1040 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
953 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); 1041 skb_headlen(skb),
954 ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", 1042 DMA_TO_DEVICE);
955 (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, 1043 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
956 skb->len, desc.fields.address); 1044 goto map_failed;
957 tx_send_failed++; 1045
958 tx_dropped++; 1046 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
959 } else { 1047
960 tx_packets++; 1048 /* Map the frags */
961 tx_bytes += skb->len; 1049 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
962 netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1050 unsigned long dma_addr;
1051 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1052
1053 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
1054 frag->page_offset, frag->size,
1055 DMA_TO_DEVICE);
1056
1057 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1058 goto map_failed_frags;
1059
1060 descs[i+1].fields.flags_len = desc_flags | frag->size;
1061 descs[i+1].fields.address = dma_addr;
963 } 1062 }
964 1063
965 if (!used_bounce) 1064 if (ibmveth_send(adapter, descs)) {
966 dma_unmap_single(&adapter->vdev->dev, data_dma_addr, 1065 adapter->tx_send_failed++;
967 skb->len, DMA_TO_DEVICE); 1066 netdev->stats.tx_dropped++;
1067 } else {
1068 netdev->stats.tx_packets++;
1069 netdev->stats.tx_bytes += skb->len;
1070 }
968 1071
969out: spin_lock_irqsave(&adapter->stats_lock, flags); 1072 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
970 netdev->stats.tx_dropped += tx_dropped; 1073 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
971 netdev->stats.tx_bytes += tx_bytes; 1074 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
972 netdev->stats.tx_packets += tx_packets; 1075 DMA_TO_DEVICE);
973 adapter->tx_send_failed += tx_send_failed;
974 adapter->tx_map_failed += tx_map_failed;
975 spin_unlock_irqrestore(&adapter->stats_lock, flags);
976 1076
1077out:
977 dev_kfree_skb(skb); 1078 dev_kfree_skb(skb);
978 return NETDEV_TX_OK; 1079 return NETDEV_TX_OK;
1080
1081map_failed_frags:
1082 last = i+1;
1083 for (i = 0; i < last; i++)
1084 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1085 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1086 DMA_TO_DEVICE);
1087
1088map_failed:
1089 if (!firmware_has_feature(FW_FEATURE_CMO))
1090 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1091 adapter->tx_map_failed++;
1092 skb_linearize(skb);
1093 force_bounce = 1;
1094 goto retry_bounce;
979} 1095}
980 1096
981static int ibmveth_poll(struct napi_struct *napi, int budget) 1097static int ibmveth_poll(struct napi_struct *napi, int budget)
982{ 1098{
983 struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); 1099 struct ibmveth_adapter *adapter =
1100 container_of(napi, struct ibmveth_adapter, napi);
984 struct net_device *netdev = adapter->netdev; 1101 struct net_device *netdev = adapter->netdev;
985 int frames_processed = 0; 1102 int frames_processed = 0;
986 unsigned long lpar_rc; 1103 unsigned long lpar_rc;
987 1104
988 restart_poll: 1105restart_poll:
989 do { 1106 do {
990 struct sk_buff *skb;
991
992 if (!ibmveth_rxq_pending_buffer(adapter)) 1107 if (!ibmveth_rxq_pending_buffer(adapter))
993 break; 1108 break;
994 1109
995 rmb(); 1110 smp_rmb();
996 if (!ibmveth_rxq_buffer_valid(adapter)) { 1111 if (!ibmveth_rxq_buffer_valid(adapter)) {
997 wmb(); /* suggested by larson1 */ 1112 wmb(); /* suggested by larson1 */
998 adapter->rx_invalid_buffer++; 1113 adapter->rx_invalid_buffer++;
999 ibmveth_debug_printk("recycling invalid buffer\n"); 1114 netdev_dbg(netdev, "recycling invalid buffer\n");
1000 ibmveth_rxq_recycle_buffer(adapter); 1115 ibmveth_rxq_recycle_buffer(adapter);
1001 } else { 1116 } else {
1117 struct sk_buff *skb, *new_skb;
1002 int length = ibmveth_rxq_frame_length(adapter); 1118 int length = ibmveth_rxq_frame_length(adapter);
1003 int offset = ibmveth_rxq_frame_offset(adapter); 1119 int offset = ibmveth_rxq_frame_offset(adapter);
1004 int csum_good = ibmveth_rxq_csum_good(adapter); 1120 int csum_good = ibmveth_rxq_csum_good(adapter);
1005 1121
1006 skb = ibmveth_rxq_get_buffer(adapter); 1122 skb = ibmveth_rxq_get_buffer(adapter);
1007 if (csum_good)
1008 skb->ip_summed = CHECKSUM_UNNECESSARY;
1009 1123
1010 ibmveth_rxq_harvest_buffer(adapter); 1124 new_skb = NULL;
1125 if (length < rx_copybreak)
1126 new_skb = netdev_alloc_skb(netdev, length);
1127
1128 if (new_skb) {
1129 skb_copy_to_linear_data(new_skb,
1130 skb->data + offset,
1131 length);
1132 if (rx_flush)
1133 ibmveth_flush_buffer(skb->data,
1134 length + offset);
1135 skb = new_skb;
1136 ibmveth_rxq_recycle_buffer(adapter);
1137 } else {
1138 ibmveth_rxq_harvest_buffer(adapter);
1139 skb_reserve(skb, offset);
1140 }
1011 1141
1012 skb_reserve(skb, offset);
1013 skb_put(skb, length); 1142 skb_put(skb, length);
1014 skb->protocol = eth_type_trans(skb, netdev); 1143 skb->protocol = eth_type_trans(skb, netdev);
1015 1144
1145 if (csum_good)
1146 skb->ip_summed = CHECKSUM_UNNECESSARY;
1147
1016 netif_receive_skb(skb); /* send it up */ 1148 netif_receive_skb(skb); /* send it up */
1017 1149
1018 netdev->stats.rx_packets++; 1150 netdev->stats.rx_packets++;
@@ -1030,7 +1162,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1030 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1162 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1031 VIO_IRQ_ENABLE); 1163 VIO_IRQ_ENABLE);
1032 1164
1033 ibmveth_assert(lpar_rc == H_SUCCESS); 1165 BUG_ON(lpar_rc != H_SUCCESS);
1034 1166
1035 napi_complete(napi); 1167 napi_complete(napi);
1036 1168
@@ -1054,7 +1186,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1054 if (napi_schedule_prep(&adapter->napi)) { 1186 if (napi_schedule_prep(&adapter->napi)) {
1055 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1187 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1056 VIO_IRQ_DISABLE); 1188 VIO_IRQ_DISABLE);
1057 ibmveth_assert(lpar_rc == H_SUCCESS); 1189 BUG_ON(lpar_rc != H_SUCCESS);
1058 __napi_schedule(&adapter->napi); 1190 __napi_schedule(&adapter->napi);
1059 } 1191 }
1060 return IRQ_HANDLED; 1192 return IRQ_HANDLED;
@@ -1071,8 +1203,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1071 IbmVethMcastEnableRecv | 1203 IbmVethMcastEnableRecv |
1072 IbmVethMcastDisableFiltering, 1204 IbmVethMcastDisableFiltering,
1073 0); 1205 0);
1074 if(lpar_rc != H_SUCCESS) { 1206 if (lpar_rc != H_SUCCESS) {
1075 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 1207 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1208 "entering promisc mode\n", lpar_rc);
1076 } 1209 }
1077 } else { 1210 } else {
1078 struct netdev_hw_addr *ha; 1211 struct netdev_hw_addr *ha;
@@ -1082,19 +1215,23 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1082 IbmVethMcastDisableFiltering | 1215 IbmVethMcastDisableFiltering |
1083 IbmVethMcastClearFilterTable, 1216 IbmVethMcastClearFilterTable,
1084 0); 1217 0);
1085 if(lpar_rc != H_SUCCESS) { 1218 if (lpar_rc != H_SUCCESS) {
1086 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 1219 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1220 "attempting to clear filter table\n",
1221 lpar_rc);
1087 } 1222 }
1088 /* add the addresses to the filter table */ 1223 /* add the addresses to the filter table */
1089 netdev_for_each_mc_addr(ha, netdev) { 1224 netdev_for_each_mc_addr(ha, netdev) {
1090 // add the multicast address to the filter table 1225 /* add the multicast address to the filter table */
1091 unsigned long mcast_addr = 0; 1226 unsigned long mcast_addr = 0;
1092 memcpy(((char *)&mcast_addr)+2, ha->addr, 6); 1227 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1093 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1228 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1094 IbmVethMcastAddFilter, 1229 IbmVethMcastAddFilter,
1095 mcast_addr); 1230 mcast_addr);
1096 if(lpar_rc != H_SUCCESS) { 1231 if (lpar_rc != H_SUCCESS) {
1097 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); 1232 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1233 "when adding an entry to the filter "
1234 "table\n", lpar_rc);
1098 } 1235 }
1099 } 1236 }
1100 1237
@@ -1102,8 +1239,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1102 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1239 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1103 IbmVethMcastEnableFiltering, 1240 IbmVethMcastEnableFiltering,
1104 0); 1241 0);
1105 if(lpar_rc != H_SUCCESS) { 1242 if (lpar_rc != H_SUCCESS) {
1106 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); 1243 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1244 "enabling filtering\n", lpar_rc);
1107 } 1245 }
1108 } 1246 }
1109} 1247}
@@ -1113,49 +1251,47 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1113 struct ibmveth_adapter *adapter = netdev_priv(dev); 1251 struct ibmveth_adapter *adapter = netdev_priv(dev);
1114 struct vio_dev *viodev = adapter->vdev; 1252 struct vio_dev *viodev = adapter->vdev;
1115 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 1253 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1116 int i; 1254 int i, rc;
1255 int need_restart = 0;
1117 1256
1118 if (new_mtu < IBMVETH_MAX_MTU) 1257 if (new_mtu < IBMVETH_MIN_MTU)
1119 return -EINVAL; 1258 return -EINVAL;
1120 1259
1121 for (i = 0; i < IbmVethNumBufferPools; i++) 1260 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1122 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) 1261 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1123 break; 1262 break;
1124 1263
1125 if (i == IbmVethNumBufferPools) 1264 if (i == IBMVETH_NUM_BUFF_POOLS)
1126 return -EINVAL; 1265 return -EINVAL;
1127 1266
1128 /* Deactivate all the buffer pools so that the next loop can activate 1267 /* Deactivate all the buffer pools so that the next loop can activate
1129 only the buffer pools necessary to hold the new MTU */ 1268 only the buffer pools necessary to hold the new MTU */
1130 for (i = 0; i < IbmVethNumBufferPools; i++) 1269 if (netif_running(adapter->netdev)) {
1131 if (adapter->rx_buff_pool[i].active) { 1270 need_restart = 1;
1132 ibmveth_free_buffer_pool(adapter, 1271 adapter->pool_config = 1;
1133 &adapter->rx_buff_pool[i]); 1272 ibmveth_close(adapter->netdev);
1134 adapter->rx_buff_pool[i].active = 0; 1273 adapter->pool_config = 0;
1135 } 1274 }
1136 1275
1137 /* Look for an active buffer pool that can hold the new MTU */ 1276 /* Look for an active buffer pool that can hold the new MTU */
1138 for(i = 0; i<IbmVethNumBufferPools; i++) { 1277 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1139 adapter->rx_buff_pool[i].active = 1; 1278 adapter->rx_buff_pool[i].active = 1;
1140 1279
1141 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1280 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1142 if (netif_running(adapter->netdev)) {
1143 adapter->pool_config = 1;
1144 ibmveth_close(adapter->netdev);
1145 adapter->pool_config = 0;
1146 dev->mtu = new_mtu;
1147 vio_cmo_set_dev_desired(viodev,
1148 ibmveth_get_desired_dma
1149 (viodev));
1150 return ibmveth_open(adapter->netdev);
1151 }
1152 dev->mtu = new_mtu; 1281 dev->mtu = new_mtu;
1153 vio_cmo_set_dev_desired(viodev, 1282 vio_cmo_set_dev_desired(viodev,
1154 ibmveth_get_desired_dma 1283 ibmveth_get_desired_dma
1155 (viodev)); 1284 (viodev));
1285 if (need_restart) {
1286 return ibmveth_open(adapter->netdev);
1287 }
1156 return 0; 1288 return 0;
1157 } 1289 }
1158 } 1290 }
1291
1292 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1293 return rc;
1294
1159 return -EINVAL; 1295 return -EINVAL;
1160} 1296}
1161 1297
@@ -1192,7 +1328,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1192 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1328 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1193 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1329 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1194 1330
1195 for (i = 0; i < IbmVethNumBufferPools; i++) { 1331 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1196 /* add the size of the active receive buffers */ 1332 /* add the size of the active receive buffers */
1197 if (adapter->rx_buff_pool[i].active) 1333 if (adapter->rx_buff_pool[i].active)
1198 ret += 1334 ret +=
@@ -1221,41 +1357,36 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1221#endif 1357#endif
1222}; 1358};
1223 1359
1224static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 1360static int __devinit ibmveth_probe(struct vio_dev *dev,
1361 const struct vio_device_id *id)
1225{ 1362{
1226 int rc, i; 1363 int rc, i;
1227 long ret;
1228 struct net_device *netdev; 1364 struct net_device *netdev;
1229 struct ibmveth_adapter *adapter; 1365 struct ibmveth_adapter *adapter;
1230 unsigned long set_attr, ret_attr;
1231
1232 unsigned char *mac_addr_p; 1366 unsigned char *mac_addr_p;
1233 unsigned int *mcastFilterSize_p; 1367 unsigned int *mcastFilterSize_p;
1234 1368
1369 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1370 dev->unit_address);
1235 1371
1236 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", 1372 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1237 dev->unit_address); 1373 NULL);
1238 1374 if (!mac_addr_p) {
1239 mac_addr_p = (unsigned char *) vio_get_attribute(dev, 1375 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1240 VETH_MAC_ADDR, NULL); 1376 return -EINVAL;
1241 if(!mac_addr_p) {
1242 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1243 "attribute\n", __FILE__, __LINE__);
1244 return 0;
1245 } 1377 }
1246 1378
1247 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev, 1379 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1248 VETH_MCAST_FILTER_SIZE, NULL); 1380 VETH_MCAST_FILTER_SIZE, NULL);
1249 if(!mcastFilterSize_p) { 1381 if (!mcastFilterSize_p) {
1250 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " 1382 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1251 "VETH_MCAST_FILTER_SIZE attribute\n", 1383 "attribute\n");
1252 __FILE__, __LINE__); 1384 return -EINVAL;
1253 return 0;
1254 } 1385 }
1255 1386
1256 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); 1387 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1257 1388
1258 if(!netdev) 1389 if (!netdev)
1259 return -ENOMEM; 1390 return -ENOMEM;
1260 1391
1261 adapter = netdev_priv(netdev); 1392 adapter = netdev_priv(netdev);
@@ -1263,19 +1394,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1263 1394
1264 adapter->vdev = dev; 1395 adapter->vdev = dev;
1265 adapter->netdev = netdev; 1396 adapter->netdev = netdev;
1266 adapter->mcastFilterSize= *mcastFilterSize_p; 1397 adapter->mcastFilterSize = *mcastFilterSize_p;
1267 adapter->pool_config = 0; 1398 adapter->pool_config = 0;
1268 1399
1269 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1400 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1270 1401
1271 /* Some older boxes running PHYP non-natively have an OF that 1402 /*
1272 returns a 8-byte local-mac-address field (and the first 1403 * Some older boxes running PHYP non-natively have an OF that returns
1273 2 bytes have to be ignored) while newer boxes' OF return 1404 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1274 a 6-byte field. Note that IEEE 1275 specifies that 1405 * ignored) while newer boxes' OF return a 6-byte field. Note that
1275 local-mac-address must be a 6-byte field. 1406 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1276 The RPA doc specifies that the first byte must be 10b, so 1407 * The RPA doc specifies that the first byte must be 10b, so we'll
1277 we'll just look for it to solve this 8 vs. 6 byte field issue */ 1408 * just look for it to solve this 8 vs. 6 byte field issue
1278 1409 */
1279 if ((*mac_addr_p & 0x3) != 0x02) 1410 if ((*mac_addr_p & 0x3) != 0x02)
1280 mac_addr_p += 2; 1411 mac_addr_p += 2;
1281 1412
@@ -1286,12 +1417,11 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1286 netdev->netdev_ops = &ibmveth_netdev_ops; 1417 netdev->netdev_ops = &ibmveth_netdev_ops;
1287 netdev->ethtool_ops = &netdev_ethtool_ops; 1418 netdev->ethtool_ops = &netdev_ethtool_ops;
1288 SET_NETDEV_DEV(netdev, &dev->dev); 1419 SET_NETDEV_DEV(netdev, &dev->dev);
1289 netdev->features |= NETIF_F_LLTX; 1420 netdev->features |= NETIF_F_SG;
1290 spin_lock_init(&adapter->stats_lock);
1291 1421
1292 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 1422 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1293 1423
1294 for(i = 0; i<IbmVethNumBufferPools; i++) { 1424 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1295 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; 1425 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1296 int error; 1426 int error;
1297 1427
@@ -1304,41 +1434,25 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1304 kobject_uevent(kobj, KOBJ_ADD); 1434 kobject_uevent(kobj, KOBJ_ADD);
1305 } 1435 }
1306 1436
1307 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1437 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1308 1438
1309 adapter->buffer_list_dma = DMA_ERROR_CODE; 1439 adapter->buffer_list_dma = DMA_ERROR_CODE;
1310 adapter->filter_list_dma = DMA_ERROR_CODE; 1440 adapter->filter_list_dma = DMA_ERROR_CODE;
1311 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1441 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1312 1442
1313 ibmveth_debug_printk("registering netdev...\n"); 1443 netdev_dbg(netdev, "registering netdev...\n");
1314
1315 ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
1316
1317 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
1318 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
1319 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
1320 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
1321
1322 ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
1323 1444
1324 if (ret == H_SUCCESS) { 1445 ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags);
1325 adapter->rx_csum = 1;
1326 netdev->features |= NETIF_F_IP_CSUM;
1327 } else
1328 ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
1329 }
1330 1446
1331 rc = register_netdev(netdev); 1447 rc = register_netdev(netdev);
1332 1448
1333 if(rc) { 1449 if (rc) {
1334 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc); 1450 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1335 free_netdev(netdev); 1451 free_netdev(netdev);
1336 return rc; 1452 return rc;
1337 } 1453 }
1338 1454
1339 ibmveth_debug_printk("registered\n"); 1455 netdev_dbg(netdev, "registered\n");
1340
1341 ibmveth_proc_register_adapter(adapter);
1342 1456
1343 return 0; 1457 return 0;
1344} 1458}
@@ -1349,114 +1463,23 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1349 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1463 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1350 int i; 1464 int i;
1351 1465
1352 for(i = 0; i<IbmVethNumBufferPools; i++) 1466 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1353 kobject_put(&adapter->rx_buff_pool[i].kobj); 1467 kobject_put(&adapter->rx_buff_pool[i].kobj);
1354 1468
1355 unregister_netdev(netdev); 1469 unregister_netdev(netdev);
1356 1470
1357 ibmveth_proc_unregister_adapter(adapter);
1358
1359 free_netdev(netdev); 1471 free_netdev(netdev);
1360 dev_set_drvdata(&dev->dev, NULL); 1472 dev_set_drvdata(&dev->dev, NULL);
1361 1473
1362 return 0; 1474 return 0;
1363} 1475}
1364 1476
1365#ifdef CONFIG_PROC_FS
1366static void ibmveth_proc_register_driver(void)
1367{
1368 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
1369 if (ibmveth_proc_dir) {
1370 }
1371}
1372
1373static void ibmveth_proc_unregister_driver(void)
1374{
1375 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1376}
1377
1378static int ibmveth_show(struct seq_file *seq, void *v)
1379{
1380 struct ibmveth_adapter *adapter = seq->private;
1381 char *current_mac = (char *) adapter->netdev->dev_addr;
1382 char *firmware_mac = (char *) &adapter->mac_addr;
1383
1384 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1385
1386 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1387 seq_printf(seq, "Current MAC: %pM\n", current_mac);
1388 seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac);
1389
1390 seq_printf(seq, "\nAdapter Statistics:\n");
1391 seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed);
1392 seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed);
1393 seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles);
1394 seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem);
1395 seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure);
1396 seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer);
1397 seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer);
1398
1399 return 0;
1400}
1401
1402static int ibmveth_proc_open(struct inode *inode, struct file *file)
1403{
1404 return single_open(file, ibmveth_show, PDE(inode)->data);
1405}
1406
1407static const struct file_operations ibmveth_proc_fops = {
1408 .owner = THIS_MODULE,
1409 .open = ibmveth_proc_open,
1410 .read = seq_read,
1411 .llseek = seq_lseek,
1412 .release = single_release,
1413};
1414
1415static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1416{
1417 struct proc_dir_entry *entry;
1418 if (ibmveth_proc_dir) {
1419 char u_addr[10];
1420 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1421 entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
1422 &ibmveth_proc_fops, adapter);
1423 if (!entry)
1424 ibmveth_error_printk("Cannot create adapter proc entry");
1425 }
1426}
1427
1428static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1429{
1430 if (ibmveth_proc_dir) {
1431 char u_addr[10];
1432 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1433 remove_proc_entry(u_addr, ibmveth_proc_dir);
1434 }
1435}
1436
1437#else /* CONFIG_PROC_FS */
1438static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1439{
1440}
1441
1442static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1443{
1444}
1445static void ibmveth_proc_register_driver(void)
1446{
1447}
1448
1449static void ibmveth_proc_unregister_driver(void)
1450{
1451}
1452#endif /* CONFIG_PROC_FS */
1453
1454static struct attribute veth_active_attr; 1477static struct attribute veth_active_attr;
1455static struct attribute veth_num_attr; 1478static struct attribute veth_num_attr;
1456static struct attribute veth_size_attr; 1479static struct attribute veth_size_attr;
1457 1480
1458static ssize_t veth_pool_show(struct kobject * kobj, 1481static ssize_t veth_pool_show(struct kobject *kobj,
1459 struct attribute * attr, char * buf) 1482 struct attribute *attr, char *buf)
1460{ 1483{
1461 struct ibmveth_buff_pool *pool = container_of(kobj, 1484 struct ibmveth_buff_pool *pool = container_of(kobj,
1462 struct ibmveth_buff_pool, 1485 struct ibmveth_buff_pool,
@@ -1471,8 +1494,8 @@ static ssize_t veth_pool_show(struct kobject * kobj,
1471 return 0; 1494 return 0;
1472} 1495}
1473 1496
1474static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, 1497static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1475const char * buf, size_t count) 1498 const char *buf, size_t count)
1476{ 1499{
1477 struct ibmveth_buff_pool *pool = container_of(kobj, 1500 struct ibmveth_buff_pool *pool = container_of(kobj,
1478 struct ibmveth_buff_pool, 1501 struct ibmveth_buff_pool,
@@ -1486,8 +1509,9 @@ const char * buf, size_t count)
1486 if (attr == &veth_active_attr) { 1509 if (attr == &veth_active_attr) {
1487 if (value && !pool->active) { 1510 if (value && !pool->active) {
1488 if (netif_running(netdev)) { 1511 if (netif_running(netdev)) {
1489 if(ibmveth_alloc_buffer_pool(pool)) { 1512 if (ibmveth_alloc_buffer_pool(pool)) {
1490 ibmveth_error_printk("unable to alloc pool\n"); 1513 netdev_err(netdev,
1514 "unable to alloc pool\n");
1491 return -ENOMEM; 1515 return -ENOMEM;
1492 } 1516 }
1493 pool->active = 1; 1517 pool->active = 1;
@@ -1496,14 +1520,15 @@ const char * buf, size_t count)
1496 adapter->pool_config = 0; 1520 adapter->pool_config = 0;
1497 if ((rc = ibmveth_open(netdev))) 1521 if ((rc = ibmveth_open(netdev)))
1498 return rc; 1522 return rc;
1499 } else 1523 } else {
1500 pool->active = 1; 1524 pool->active = 1;
1525 }
1501 } else if (!value && pool->active) { 1526 } else if (!value && pool->active) {
1502 int mtu = netdev->mtu + IBMVETH_BUFF_OH; 1527 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1503 int i; 1528 int i;
1504 /* Make sure there is a buffer pool with buffers that 1529 /* Make sure there is a buffer pool with buffers that
1505 can hold a packet of the size of the MTU */ 1530 can hold a packet of the size of the MTU */
1506 for (i = 0; i < IbmVethNumBufferPools; i++) { 1531 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1507 if (pool == &adapter->rx_buff_pool[i]) 1532 if (pool == &adapter->rx_buff_pool[i])
1508 continue; 1533 continue;
1509 if (!adapter->rx_buff_pool[i].active) 1534 if (!adapter->rx_buff_pool[i].active)
@@ -1512,8 +1537,8 @@ const char * buf, size_t count)
1512 break; 1537 break;
1513 } 1538 }
1514 1539
1515 if (i == IbmVethNumBufferPools) { 1540 if (i == IBMVETH_NUM_BUFF_POOLS) {
1516 ibmveth_error_printk("no active pool >= MTU\n"); 1541 netdev_err(netdev, "no active pool >= MTU\n");
1517 return -EPERM; 1542 return -EPERM;
1518 } 1543 }
1519 1544
@@ -1528,9 +1553,9 @@ const char * buf, size_t count)
1528 pool->active = 0; 1553 pool->active = 0;
1529 } 1554 }
1530 } else if (attr == &veth_num_attr) { 1555 } else if (attr == &veth_num_attr) {
1531 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) 1556 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1532 return -EINVAL; 1557 return -EINVAL;
1533 else { 1558 } else {
1534 if (netif_running(netdev)) { 1559 if (netif_running(netdev)) {
1535 adapter->pool_config = 1; 1560 adapter->pool_config = 1;
1536 ibmveth_close(netdev); 1561 ibmveth_close(netdev);
@@ -1538,13 +1563,14 @@ const char * buf, size_t count)
1538 pool->size = value; 1563 pool->size = value;
1539 if ((rc = ibmveth_open(netdev))) 1564 if ((rc = ibmveth_open(netdev)))
1540 return rc; 1565 return rc;
1541 } else 1566 } else {
1542 pool->size = value; 1567 pool->size = value;
1568 }
1543 } 1569 }
1544 } else if (attr == &veth_size_attr) { 1570 } else if (attr == &veth_size_attr) {
1545 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) 1571 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1546 return -EINVAL; 1572 return -EINVAL;
1547 else { 1573 } else {
1548 if (netif_running(netdev)) { 1574 if (netif_running(netdev)) {
1549 adapter->pool_config = 1; 1575 adapter->pool_config = 1;
1550 ibmveth_close(netdev); 1576 ibmveth_close(netdev);
@@ -1552,8 +1578,9 @@ const char * buf, size_t count)
1552 pool->buff_size = value; 1578 pool->buff_size = value;
1553 if ((rc = ibmveth_open(netdev))) 1579 if ((rc = ibmveth_open(netdev)))
1554 return rc; 1580 return rc;
1555 } else 1581 } else {
1556 pool->buff_size = value; 1582 pool->buff_size = value;
1583 }
1557 } 1584 }
1558 } 1585 }
1559 1586
@@ -1563,16 +1590,16 @@ const char * buf, size_t count)
1563} 1590}
1564 1591
1565 1592
1566#define ATTR(_name, _mode) \ 1593#define ATTR(_name, _mode) \
1567 struct attribute veth_##_name##_attr = { \ 1594 struct attribute veth_##_name##_attr = { \
1568 .name = __stringify(_name), .mode = _mode, \ 1595 .name = __stringify(_name), .mode = _mode, \
1569 }; 1596 };
1570 1597
1571static ATTR(active, 0644); 1598static ATTR(active, 0644);
1572static ATTR(num, 0644); 1599static ATTR(num, 0644);
1573static ATTR(size, 0644); 1600static ATTR(size, 0644);
1574 1601
1575static struct attribute * veth_pool_attrs[] = { 1602static struct attribute *veth_pool_attrs[] = {
1576 &veth_active_attr, 1603 &veth_active_attr,
1577 &veth_num_attr, 1604 &veth_num_attr,
1578 &veth_size_attr, 1605 &veth_size_attr,
@@ -1597,7 +1624,7 @@ static int ibmveth_resume(struct device *dev)
1597 return 0; 1624 return 0;
1598} 1625}
1599 1626
1600static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1627static struct vio_device_id ibmveth_device_table[] __devinitdata = {
1601 { "network", "IBM,l-lan"}, 1628 { "network", "IBM,l-lan"},
1602 { "", "" } 1629 { "", "" }
1603}; 1630};
@@ -1621,9 +1648,8 @@ static struct vio_driver ibmveth_driver = {
1621 1648
1622static int __init ibmveth_module_init(void) 1649static int __init ibmveth_module_init(void)
1623{ 1650{
1624 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); 1651 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1625 1652 ibmveth_driver_string, ibmveth_driver_version);
1626 ibmveth_proc_register_driver();
1627 1653
1628 return vio_register_driver(&ibmveth_driver); 1654 return vio_register_driver(&ibmveth_driver);
1629} 1655}
@@ -1631,7 +1657,6 @@ static int __init ibmveth_module_init(void)
1631static void __exit ibmveth_module_exit(void) 1657static void __exit ibmveth_module_exit(void)
1632{ 1658{
1633 vio_unregister_driver(&ibmveth_driver); 1659 vio_unregister_driver(&ibmveth_driver);
1634 ibmveth_proc_unregister_driver();
1635} 1660}
1636 1661
1637module_init(ibmveth_module_init); 1662module_init(ibmveth_module_init);