diff options
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r-- | drivers/net/ibmveth.c | 953 |
1 files changed, 491 insertions, 462 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 4734c939ad03..b3e157ed6776 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -1,122 +1,84 @@ | |||
1 | /**************************************************************************/ | ||
2 | /* */ | ||
3 | /* IBM eServer i/pSeries Virtual Ethernet Device Driver */ | ||
4 | /* Copyright (C) 2003 IBM Corp. */ | ||
5 | /* Originally written by Dave Larson (larson1@us.ibm.com) */ | ||
6 | /* Maintained by Santiago Leon (santil@us.ibm.com) */ | ||
7 | /* */ | ||
8 | /* This program is free software; you can redistribute it and/or modify */ | ||
9 | /* it under the terms of the GNU General Public License as published by */ | ||
10 | /* the Free Software Foundation; either version 2 of the License, or */ | ||
11 | /* (at your option) any later version. */ | ||
12 | /* */ | ||
13 | /* This program is distributed in the hope that it will be useful, */ | ||
14 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | ||
15 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | ||
16 | /* GNU General Public License for more details. */ | ||
17 | /* */ | ||
18 | /* You should have received a copy of the GNU General Public License */ | ||
19 | /* along with this program; if not, write to the Free Software */ | ||
20 | /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */ | ||
21 | /* USA */ | ||
22 | /* */ | ||
23 | /* This module contains the implementation of a virtual ethernet device */ | ||
24 | /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ | ||
25 | /* option of the RS/6000 Platform Architechture to interface with virtual */ | ||
26 | /* ethernet NICs that are presented to the partition by the hypervisor. */ | ||
27 | /* */ | ||
28 | /**************************************************************************/ | ||
29 | /* | 1 | /* |
30 | TODO: | 2 | * IBM Power Virtual Ethernet Device Driver |
31 | - add support for sysfs | 3 | * |
32 | - possibly remove procfs support | 4 | * This program is free software; you can redistribute it and/or modify |
33 | */ | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2003, 2010 | ||
19 | * | ||
20 | * Authors: Dave Larson <larson1@us.ibm.com> | ||
21 | * Santiago Leon <santil@linux.vnet.ibm.com> | ||
22 | * Brian King <brking@linux.vnet.ibm.com> | ||
23 | * Robert Jennings <rcj@linux.vnet.ibm.com> | ||
24 | * Anton Blanchard <anton@au.ibm.com> | ||
25 | */ | ||
34 | 26 | ||
35 | #include <linux/module.h> | 27 | #include <linux/module.h> |
36 | #include <linux/moduleparam.h> | 28 | #include <linux/moduleparam.h> |
37 | #include <linux/types.h> | 29 | #include <linux/types.h> |
38 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
39 | #include <linux/ioport.h> | ||
40 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
41 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
42 | #include <linux/netdevice.h> | 33 | #include <linux/netdevice.h> |
43 | #include <linux/etherdevice.h> | 34 | #include <linux/etherdevice.h> |
44 | #include <linux/skbuff.h> | 35 | #include <linux/skbuff.h> |
45 | #include <linux/init.h> | 36 | #include <linux/init.h> |
46 | #include <linux/delay.h> | ||
47 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
48 | #include <linux/pm.h> | 38 | #include <linux/pm.h> |
49 | #include <linux/ethtool.h> | 39 | #include <linux/ethtool.h> |
50 | #include <linux/proc_fs.h> | ||
51 | #include <linux/in.h> | 40 | #include <linux/in.h> |
52 | #include <linux/ip.h> | 41 | #include <linux/ip.h> |
42 | #include <linux/ipv6.h> | ||
53 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
54 | #include <net/net_namespace.h> | ||
55 | #include <asm/hvcall.h> | 44 | #include <asm/hvcall.h> |
56 | #include <asm/atomic.h> | 45 | #include <asm/atomic.h> |
57 | #include <asm/vio.h> | 46 | #include <asm/vio.h> |
58 | #include <asm/iommu.h> | 47 | #include <asm/iommu.h> |
59 | #include <asm/uaccess.h> | ||
60 | #include <asm/firmware.h> | 48 | #include <asm/firmware.h> |
61 | #include <linux/seq_file.h> | ||
62 | 49 | ||
63 | #include "ibmveth.h" | 50 | #include "ibmveth.h" |
64 | 51 | ||
65 | #undef DEBUG | ||
66 | |||
67 | #define ibmveth_printk(fmt, args...) \ | ||
68 | printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args) | ||
69 | |||
70 | #define ibmveth_error_printk(fmt, args...) \ | ||
71 | printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) | ||
72 | |||
73 | #ifdef DEBUG | ||
74 | #define ibmveth_debug_printk_no_adapter(fmt, args...) \ | ||
75 | printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args) | ||
76 | #define ibmveth_debug_printk(fmt, args...) \ | ||
77 | printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) | ||
78 | #define ibmveth_assert(expr) \ | ||
79 | if(!(expr)) { \ | ||
80 | printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \ | ||
81 | BUG(); \ | ||
82 | } | ||
83 | #else | ||
84 | #define ibmveth_debug_printk_no_adapter(fmt, args...) | ||
85 | #define ibmveth_debug_printk(fmt, args...) | ||
86 | #define ibmveth_assert(expr) | ||
87 | #endif | ||
88 | |||
89 | static int ibmveth_open(struct net_device *dev); | ||
90 | static int ibmveth_close(struct net_device *dev); | ||
91 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | ||
92 | static int ibmveth_poll(struct napi_struct *napi, int budget); | ||
93 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
94 | static void ibmveth_set_multicast_list(struct net_device *dev); | ||
95 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); | ||
96 | static void ibmveth_proc_register_driver(void); | ||
97 | static void ibmveth_proc_unregister_driver(void); | ||
98 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | ||
99 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | ||
100 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); | 52 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); |
101 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | 53 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); |
102 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); | 54 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); |
103 | static struct kobj_type ktype_veth_pool; | ||
104 | 55 | ||
56 | static struct kobj_type ktype_veth_pool; | ||
105 | 57 | ||
106 | #ifdef CONFIG_PROC_FS | ||
107 | #define IBMVETH_PROC_DIR "ibmveth" | ||
108 | static struct proc_dir_entry *ibmveth_proc_dir; | ||
109 | #endif | ||
110 | 58 | ||
111 | static const char ibmveth_driver_name[] = "ibmveth"; | 59 | static const char ibmveth_driver_name[] = "ibmveth"; |
112 | static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; | 60 | static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; |
113 | #define ibmveth_driver_version "1.03" | 61 | #define ibmveth_driver_version "1.04" |
114 | 62 | ||
115 | MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); | 63 | MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>"); |
116 | MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); | 64 | MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); |
117 | MODULE_LICENSE("GPL"); | 65 | MODULE_LICENSE("GPL"); |
118 | MODULE_VERSION(ibmveth_driver_version); | 66 | MODULE_VERSION(ibmveth_driver_version); |
119 | 67 | ||
68 | static unsigned int tx_copybreak __read_mostly = 128; | ||
69 | module_param(tx_copybreak, uint, 0644); | ||
70 | MODULE_PARM_DESC(tx_copybreak, | ||
71 | "Maximum size of packet that is copied to a new buffer on transmit"); | ||
72 | |||
73 | static unsigned int rx_copybreak __read_mostly = 128; | ||
74 | module_param(rx_copybreak, uint, 0644); | ||
75 | MODULE_PARM_DESC(rx_copybreak, | ||
76 | "Maximum size of packet that is copied to a new buffer on receive"); | ||
77 | |||
78 | static unsigned int rx_flush __read_mostly = 0; | ||
79 | module_param(rx_flush, uint, 0644); | ||
80 | MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); | ||
81 | |||
120 | struct ibmveth_stat { | 82 | struct ibmveth_stat { |
121 | char name[ETH_GSTRING_LEN]; | 83 | char name[ETH_GSTRING_LEN]; |
122 | int offset; | 84 | int offset; |
@@ -128,12 +90,16 @@ struct ibmveth_stat { | |||
128 | struct ibmveth_stat ibmveth_stats[] = { | 90 | struct ibmveth_stat ibmveth_stats[] = { |
129 | { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, | 91 | { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, |
130 | { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, | 92 | { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, |
131 | { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, | 93 | { "replenish_add_buff_failure", |
132 | { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, | 94 | IBMVETH_STAT_OFF(replenish_add_buff_failure) }, |
95 | { "replenish_add_buff_success", | ||
96 | IBMVETH_STAT_OFF(replenish_add_buff_success) }, | ||
133 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, | 97 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, |
134 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, | 98 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, |
135 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, | 99 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, |
136 | { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, | 100 | { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, |
101 | { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, | ||
102 | { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, | ||
137 | }; | 103 | }; |
138 | 104 | ||
139 | /* simple methods of getting data from the current rxq entry */ | 105 | /* simple methods of getting data from the current rxq entry */ |
@@ -144,41 +110,44 @@ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) | |||
144 | 110 | ||
145 | static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) | 111 | static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) |
146 | { | 112 | { |
147 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; | 113 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> |
114 | IBMVETH_RXQ_TOGGLE_SHIFT; | ||
148 | } | 115 | } |
149 | 116 | ||
150 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) | 117 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) |
151 | { | 118 | { |
152 | return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); | 119 | return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; |
153 | } | 120 | } |
154 | 121 | ||
155 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) | 122 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) |
156 | { | 123 | { |
157 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); | 124 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID; |
158 | } | 125 | } |
159 | 126 | ||
160 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) | 127 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) |
161 | { | 128 | { |
162 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); | 129 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; |
163 | } | 130 | } |
164 | 131 | ||
165 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) | 132 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) |
166 | { | 133 | { |
167 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); | 134 | return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length; |
168 | } | 135 | } |
169 | 136 | ||
170 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) | 137 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) |
171 | { | 138 | { |
172 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); | 139 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD; |
173 | } | 140 | } |
174 | 141 | ||
175 | /* setup the initial settings for a buffer pool */ | 142 | /* setup the initial settings for a buffer pool */ |
176 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) | 143 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, |
144 | u32 pool_index, u32 pool_size, | ||
145 | u32 buff_size, u32 pool_active) | ||
177 | { | 146 | { |
178 | pool->size = pool_size; | 147 | pool->size = pool_size; |
179 | pool->index = pool_index; | 148 | pool->index = pool_index; |
180 | pool->buff_size = buff_size; | 149 | pool->buff_size = buff_size; |
181 | pool->threshold = pool_size / 2; | 150 | pool->threshold = pool_size * 7 / 8; |
182 | pool->active = pool_active; | 151 | pool->active = pool_active; |
183 | } | 152 | } |
184 | 153 | ||
@@ -189,12 +158,11 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
189 | 158 | ||
190 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); | 159 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); |
191 | 160 | ||
192 | if(!pool->free_map) { | 161 | if (!pool->free_map) |
193 | return -1; | 162 | return -1; |
194 | } | ||
195 | 163 | ||
196 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); | 164 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); |
197 | if(!pool->dma_addr) { | 165 | if (!pool->dma_addr) { |
198 | kfree(pool->free_map); | 166 | kfree(pool->free_map); |
199 | pool->free_map = NULL; | 167 | pool->free_map = NULL; |
200 | return -1; | 168 | return -1; |
@@ -202,7 +170,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
202 | 170 | ||
203 | pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); | 171 | pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); |
204 | 172 | ||
205 | if(!pool->skbuff) { | 173 | if (!pool->skbuff) { |
206 | kfree(pool->dma_addr); | 174 | kfree(pool->dma_addr); |
207 | pool->dma_addr = NULL; | 175 | pool->dma_addr = NULL; |
208 | 176 | ||
@@ -213,9 +181,8 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
213 | 181 | ||
214 | memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); | 182 | memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); |
215 | 183 | ||
216 | for(i = 0; i < pool->size; ++i) { | 184 | for (i = 0; i < pool->size; ++i) |
217 | pool->free_map[i] = i; | 185 | pool->free_map[i] = i; |
218 | } | ||
219 | 186 | ||
220 | atomic_set(&pool->available, 0); | 187 | atomic_set(&pool->available, 0); |
221 | pool->producer_index = 0; | 188 | pool->producer_index = 0; |
@@ -224,10 +191,19 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
224 | return 0; | 191 | return 0; |
225 | } | 192 | } |
226 | 193 | ||
194 | static inline void ibmveth_flush_buffer(void *addr, unsigned long length) | ||
195 | { | ||
196 | unsigned long offset; | ||
197 | |||
198 | for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) | ||
199 | asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); | ||
200 | } | ||
201 | |||
227 | /* replenish the buffers for a pool. note that we don't need to | 202 | /* replenish the buffers for a pool. note that we don't need to |
228 | * skb_reserve these since they are used for incoming... | 203 | * skb_reserve these since they are used for incoming... |
229 | */ | 204 | */ |
230 | static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) | 205 | static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, |
206 | struct ibmveth_buff_pool *pool) | ||
231 | { | 207 | { |
232 | u32 i; | 208 | u32 i; |
233 | u32 count = pool->size - atomic_read(&pool->available); | 209 | u32 count = pool->size - atomic_read(&pool->available); |
@@ -240,23 +216,26 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
240 | 216 | ||
241 | mb(); | 217 | mb(); |
242 | 218 | ||
243 | for(i = 0; i < count; ++i) { | 219 | for (i = 0; i < count; ++i) { |
244 | union ibmveth_buf_desc desc; | 220 | union ibmveth_buf_desc desc; |
245 | 221 | ||
246 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); | 222 | skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); |
247 | 223 | ||
248 | if(!skb) { | 224 | if (!skb) { |
249 | ibmveth_debug_printk("replenish: unable to allocate skb\n"); | 225 | netdev_dbg(adapter->netdev, |
226 | "replenish: unable to allocate skb\n"); | ||
250 | adapter->replenish_no_mem++; | 227 | adapter->replenish_no_mem++; |
251 | break; | 228 | break; |
252 | } | 229 | } |
253 | 230 | ||
254 | free_index = pool->consumer_index; | 231 | free_index = pool->consumer_index; |
255 | pool->consumer_index = (pool->consumer_index + 1) % pool->size; | 232 | pool->consumer_index++; |
233 | if (pool->consumer_index >= pool->size) | ||
234 | pool->consumer_index = 0; | ||
256 | index = pool->free_map[free_index]; | 235 | index = pool->free_map[free_index]; |
257 | 236 | ||
258 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); | 237 | BUG_ON(index == IBM_VETH_INVALID_MAP); |
259 | ibmveth_assert(pool->skbuff[index] == NULL); | 238 | BUG_ON(pool->skbuff[index] != NULL); |
260 | 239 | ||
261 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 240 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
262 | pool->buff_size, DMA_FROM_DEVICE); | 241 | pool->buff_size, DMA_FROM_DEVICE); |
@@ -269,16 +248,23 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
269 | pool->skbuff[index] = skb; | 248 | pool->skbuff[index] = skb; |
270 | 249 | ||
271 | correlator = ((u64)pool->index << 32) | index; | 250 | correlator = ((u64)pool->index << 32) | index; |
272 | *(u64*)skb->data = correlator; | 251 | *(u64 *)skb->data = correlator; |
273 | 252 | ||
274 | desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; | 253 | desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; |
275 | desc.fields.address = dma_addr; | 254 | desc.fields.address = dma_addr; |
276 | 255 | ||
277 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 256 | if (rx_flush) { |
257 | unsigned int len = min(pool->buff_size, | ||
258 | adapter->netdev->mtu + | ||
259 | IBMVETH_BUFF_OH); | ||
260 | ibmveth_flush_buffer(skb->data, len); | ||
261 | } | ||
262 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, | ||
263 | desc.desc); | ||
278 | 264 | ||
279 | if (lpar_rc != H_SUCCESS) | 265 | if (lpar_rc != H_SUCCESS) { |
280 | goto failure; | 266 | goto failure; |
281 | else { | 267 | } else { |
282 | buffers_added++; | 268 | buffers_added++; |
283 | adapter->replenish_add_buff_success++; | 269 | adapter->replenish_add_buff_success++; |
284 | } | 270 | } |
@@ -313,26 +299,31 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
313 | 299 | ||
314 | adapter->replenish_task_cycles++; | 300 | adapter->replenish_task_cycles++; |
315 | 301 | ||
316 | for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) | 302 | for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { |
317 | if(adapter->rx_buff_pool[i].active) | 303 | struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; |
318 | ibmveth_replenish_buffer_pool(adapter, | ||
319 | &adapter->rx_buff_pool[i]); | ||
320 | 304 | ||
321 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 305 | if (pool->active && |
306 | (atomic_read(&pool->available) < pool->threshold)) | ||
307 | ibmveth_replenish_buffer_pool(adapter, pool); | ||
308 | } | ||
309 | |||
310 | adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + | ||
311 | 4096 - 8); | ||
322 | } | 312 | } |
323 | 313 | ||
324 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ | 314 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ |
325 | static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) | 315 | static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, |
316 | struct ibmveth_buff_pool *pool) | ||
326 | { | 317 | { |
327 | int i; | 318 | int i; |
328 | 319 | ||
329 | kfree(pool->free_map); | 320 | kfree(pool->free_map); |
330 | pool->free_map = NULL; | 321 | pool->free_map = NULL; |
331 | 322 | ||
332 | if(pool->skbuff && pool->dma_addr) { | 323 | if (pool->skbuff && pool->dma_addr) { |
333 | for(i = 0; i < pool->size; ++i) { | 324 | for (i = 0; i < pool->size; ++i) { |
334 | struct sk_buff *skb = pool->skbuff[i]; | 325 | struct sk_buff *skb = pool->skbuff[i]; |
335 | if(skb) { | 326 | if (skb) { |
336 | dma_unmap_single(&adapter->vdev->dev, | 327 | dma_unmap_single(&adapter->vdev->dev, |
337 | pool->dma_addr[i], | 328 | pool->dma_addr[i], |
338 | pool->buff_size, | 329 | pool->buff_size, |
@@ -343,31 +334,32 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm | |||
343 | } | 334 | } |
344 | } | 335 | } |
345 | 336 | ||
346 | if(pool->dma_addr) { | 337 | if (pool->dma_addr) { |
347 | kfree(pool->dma_addr); | 338 | kfree(pool->dma_addr); |
348 | pool->dma_addr = NULL; | 339 | pool->dma_addr = NULL; |
349 | } | 340 | } |
350 | 341 | ||
351 | if(pool->skbuff) { | 342 | if (pool->skbuff) { |
352 | kfree(pool->skbuff); | 343 | kfree(pool->skbuff); |
353 | pool->skbuff = NULL; | 344 | pool->skbuff = NULL; |
354 | } | 345 | } |
355 | } | 346 | } |
356 | 347 | ||
357 | /* remove a buffer from a pool */ | 348 | /* remove a buffer from a pool */ |
358 | static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) | 349 | static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, |
350 | u64 correlator) | ||
359 | { | 351 | { |
360 | unsigned int pool = correlator >> 32; | 352 | unsigned int pool = correlator >> 32; |
361 | unsigned int index = correlator & 0xffffffffUL; | 353 | unsigned int index = correlator & 0xffffffffUL; |
362 | unsigned int free_index; | 354 | unsigned int free_index; |
363 | struct sk_buff *skb; | 355 | struct sk_buff *skb; |
364 | 356 | ||
365 | ibmveth_assert(pool < IbmVethNumBufferPools); | 357 | BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); |
366 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 358 | BUG_ON(index >= adapter->rx_buff_pool[pool].size); |
367 | 359 | ||
368 | skb = adapter->rx_buff_pool[pool].skbuff[index]; | 360 | skb = adapter->rx_buff_pool[pool].skbuff[index]; |
369 | 361 | ||
370 | ibmveth_assert(skb != NULL); | 362 | BUG_ON(skb == NULL); |
371 | 363 | ||
372 | adapter->rx_buff_pool[pool].skbuff[index] = NULL; | 364 | adapter->rx_buff_pool[pool].skbuff[index] = NULL; |
373 | 365 | ||
@@ -377,9 +369,10 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 | |||
377 | DMA_FROM_DEVICE); | 369 | DMA_FROM_DEVICE); |
378 | 370 | ||
379 | free_index = adapter->rx_buff_pool[pool].producer_index; | 371 | free_index = adapter->rx_buff_pool[pool].producer_index; |
380 | adapter->rx_buff_pool[pool].producer_index | 372 | adapter->rx_buff_pool[pool].producer_index++; |
381 | = (adapter->rx_buff_pool[pool].producer_index + 1) | 373 | if (adapter->rx_buff_pool[pool].producer_index >= |
382 | % adapter->rx_buff_pool[pool].size; | 374 | adapter->rx_buff_pool[pool].size) |
375 | adapter->rx_buff_pool[pool].producer_index = 0; | ||
383 | adapter->rx_buff_pool[pool].free_map[free_index] = index; | 376 | adapter->rx_buff_pool[pool].free_map[free_index] = index; |
384 | 377 | ||
385 | mb(); | 378 | mb(); |
@@ -394,8 +387,8 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada | |||
394 | unsigned int pool = correlator >> 32; | 387 | unsigned int pool = correlator >> 32; |
395 | unsigned int index = correlator & 0xffffffffUL; | 388 | unsigned int index = correlator & 0xffffffffUL; |
396 | 389 | ||
397 | ibmveth_assert(pool < IbmVethNumBufferPools); | 390 | BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); |
398 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 391 | BUG_ON(index >= adapter->rx_buff_pool[pool].size); |
399 | 392 | ||
400 | return adapter->rx_buff_pool[pool].skbuff[index]; | 393 | return adapter->rx_buff_pool[pool].skbuff[index]; |
401 | } | 394 | } |
@@ -410,10 +403,10 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
410 | union ibmveth_buf_desc desc; | 403 | union ibmveth_buf_desc desc; |
411 | unsigned long lpar_rc; | 404 | unsigned long lpar_rc; |
412 | 405 | ||
413 | ibmveth_assert(pool < IbmVethNumBufferPools); | 406 | BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); |
414 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | 407 | BUG_ON(index >= adapter->rx_buff_pool[pool].size); |
415 | 408 | ||
416 | if(!adapter->rx_buff_pool[pool].active) { | 409 | if (!adapter->rx_buff_pool[pool].active) { |
417 | ibmveth_rxq_harvest_buffer(adapter); | 410 | ibmveth_rxq_harvest_buffer(adapter); |
418 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); | 411 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); |
419 | return; | 412 | return; |
@@ -425,12 +418,13 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
425 | 418 | ||
426 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 419 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
427 | 420 | ||
428 | if(lpar_rc != H_SUCCESS) { | 421 | if (lpar_rc != H_SUCCESS) { |
429 | ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); | 422 | netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " |
423 | "during recycle rc=%ld", lpar_rc); | ||
430 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 424 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
431 | } | 425 | } |
432 | 426 | ||
433 | if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | 427 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
434 | adapter->rx_queue.index = 0; | 428 | adapter->rx_queue.index = 0; |
435 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | 429 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; |
436 | } | 430 | } |
@@ -440,7 +434,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
440 | { | 434 | { |
441 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 435 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
442 | 436 | ||
443 | if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | 437 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
444 | adapter->rx_queue.index = 0; | 438 | adapter->rx_queue.index = 0; |
445 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | 439 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; |
446 | } | 440 | } |
@@ -451,7 +445,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
451 | int i; | 445 | int i; |
452 | struct device *dev = &adapter->vdev->dev; | 446 | struct device *dev = &adapter->vdev->dev; |
453 | 447 | ||
454 | if(adapter->buffer_list_addr != NULL) { | 448 | if (adapter->buffer_list_addr != NULL) { |
455 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { | 449 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { |
456 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, | 450 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
457 | DMA_BIDIRECTIONAL); | 451 | DMA_BIDIRECTIONAL); |
@@ -461,7 +455,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
461 | adapter->buffer_list_addr = NULL; | 455 | adapter->buffer_list_addr = NULL; |
462 | } | 456 | } |
463 | 457 | ||
464 | if(adapter->filter_list_addr != NULL) { | 458 | if (adapter->filter_list_addr != NULL) { |
465 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { | 459 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { |
466 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | 460 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, |
467 | DMA_BIDIRECTIONAL); | 461 | DMA_BIDIRECTIONAL); |
@@ -471,7 +465,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
471 | adapter->filter_list_addr = NULL; | 465 | adapter->filter_list_addr = NULL; |
472 | } | 466 | } |
473 | 467 | ||
474 | if(adapter->rx_queue.queue_addr != NULL) { | 468 | if (adapter->rx_queue.queue_addr != NULL) { |
475 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { | 469 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { |
476 | dma_unmap_single(dev, | 470 | dma_unmap_single(dev, |
477 | adapter->rx_queue.queue_dma, | 471 | adapter->rx_queue.queue_dma, |
@@ -483,7 +477,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
483 | adapter->rx_queue.queue_addr = NULL; | 477 | adapter->rx_queue.queue_addr = NULL; |
484 | } | 478 | } |
485 | 479 | ||
486 | for(i = 0; i<IbmVethNumBufferPools; i++) | 480 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
487 | if (adapter->rx_buff_pool[i].active) | 481 | if (adapter->rx_buff_pool[i].active) |
488 | ibmveth_free_buffer_pool(adapter, | 482 | ibmveth_free_buffer_pool(adapter, |
489 | &adapter->rx_buff_pool[i]); | 483 | &adapter->rx_buff_pool[i]); |
@@ -506,9 +500,11 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | |||
506 | { | 500 | { |
507 | int rc, try_again = 1; | 501 | int rc, try_again = 1; |
508 | 502 | ||
509 | /* After a kexec the adapter will still be open, so our attempt to | 503 | /* |
510 | * open it will fail. So if we get a failure we free the adapter and | 504 | * After a kexec the adapter will still be open, so our attempt to |
511 | * try again, but only once. */ | 505 | * open it will fail. So if we get a failure we free the adapter and |
506 | * try again, but only once. | ||
507 | */ | ||
512 | retry: | 508 | retry: |
513 | rc = h_register_logical_lan(adapter->vdev->unit_address, | 509 | rc = h_register_logical_lan(adapter->vdev->unit_address, |
514 | adapter->buffer_list_dma, rxq_desc.desc, | 510 | adapter->buffer_list_dma, rxq_desc.desc, |
@@ -537,28 +533,31 @@ static int ibmveth_open(struct net_device *netdev) | |||
537 | int i; | 533 | int i; |
538 | struct device *dev; | 534 | struct device *dev; |
539 | 535 | ||
540 | ibmveth_debug_printk("open starting\n"); | 536 | netdev_dbg(netdev, "open starting\n"); |
541 | 537 | ||
542 | napi_enable(&adapter->napi); | 538 | napi_enable(&adapter->napi); |
543 | 539 | ||
544 | for(i = 0; i<IbmVethNumBufferPools; i++) | 540 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
545 | rxq_entries += adapter->rx_buff_pool[i].size; | 541 | rxq_entries += adapter->rx_buff_pool[i].size; |
546 | 542 | ||
547 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 543 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
548 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 544 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
549 | 545 | ||
550 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { | 546 | if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { |
551 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); | 547 | netdev_err(netdev, "unable to allocate filter or buffer list " |
548 | "pages\n"); | ||
552 | ibmveth_cleanup(adapter); | 549 | ibmveth_cleanup(adapter); |
553 | napi_disable(&adapter->napi); | 550 | napi_disable(&adapter->napi); |
554 | return -ENOMEM; | 551 | return -ENOMEM; |
555 | } | 552 | } |
556 | 553 | ||
557 | adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; | 554 | adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * |
558 | adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); | 555 | rxq_entries; |
556 | adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, | ||
557 | GFP_KERNEL); | ||
559 | 558 | ||
560 | if(!adapter->rx_queue.queue_addr) { | 559 | if (!adapter->rx_queue.queue_addr) { |
561 | ibmveth_error_printk("unable to allocate rx queue pages\n"); | 560 | netdev_err(netdev, "unable to allocate rx queue pages\n"); |
562 | ibmveth_cleanup(adapter); | 561 | ibmveth_cleanup(adapter); |
563 | napi_disable(&adapter->napi); | 562 | napi_disable(&adapter->napi); |
564 | return -ENOMEM; | 563 | return -ENOMEM; |
@@ -577,7 +576,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
577 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || | 576 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || |
578 | (dma_mapping_error(dev, adapter->filter_list_dma)) || | 577 | (dma_mapping_error(dev, adapter->filter_list_dma)) || |
579 | (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { | 578 | (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { |
580 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 579 | netdev_err(netdev, "unable to map filter or buffer list " |
580 | "pages\n"); | ||
581 | ibmveth_cleanup(adapter); | 581 | ibmveth_cleanup(adapter); |
582 | napi_disable(&adapter->napi); | 582 | napi_disable(&adapter->napi); |
583 | return -ENOMEM; | 583 | return -ENOMEM; |
@@ -590,20 +590,23 @@ static int ibmveth_open(struct net_device *netdev) | |||
590 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 590 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); |
591 | mac_address = mac_address >> 16; | 591 | mac_address = mac_address >> 16; |
592 | 592 | ||
593 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; | 593 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | |
594 | adapter->rx_queue.queue_len; | ||
594 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; | 595 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; |
595 | 596 | ||
596 | ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); | 597 | netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); |
597 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); | 598 | netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); |
598 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); | 599 | netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); |
599 | 600 | ||
600 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); | 601 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); |
601 | 602 | ||
602 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); | 603 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); |
603 | 604 | ||
604 | if(lpar_rc != H_SUCCESS) { | 605 | if (lpar_rc != H_SUCCESS) { |
605 | ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); | 606 | netdev_err(netdev, "h_register_logical_lan failed with %ld\n", |
606 | ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n", | 607 | lpar_rc); |
608 | netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " | ||
609 | "desc:0x%llx MAC:0x%llx\n", | ||
607 | adapter->buffer_list_dma, | 610 | adapter->buffer_list_dma, |
608 | adapter->filter_list_dma, | 611 | adapter->filter_list_dma, |
609 | rxq_desc.desc, | 612 | rxq_desc.desc, |
@@ -613,11 +616,11 @@ static int ibmveth_open(struct net_device *netdev) | |||
613 | return -ENONET; | 616 | return -ENONET; |
614 | } | 617 | } |
615 | 618 | ||
616 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 619 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
617 | if(!adapter->rx_buff_pool[i].active) | 620 | if (!adapter->rx_buff_pool[i].active) |
618 | continue; | 621 | continue; |
619 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | 622 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { |
620 | ibmveth_error_printk("unable to alloc pool\n"); | 623 | netdev_err(netdev, "unable to alloc pool\n"); |
621 | adapter->rx_buff_pool[i].active = 0; | 624 | adapter->rx_buff_pool[i].active = 0; |
622 | ibmveth_cleanup(adapter); | 625 | ibmveth_cleanup(adapter); |
623 | napi_disable(&adapter->napi); | 626 | napi_disable(&adapter->napi); |
@@ -625,9 +628,12 @@ static int ibmveth_open(struct net_device *netdev) | |||
625 | } | 628 | } |
626 | } | 629 | } |
627 | 630 | ||
628 | ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); | 631 | netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); |
629 | if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { | 632 | rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, |
630 | ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); | 633 | netdev); |
634 | if (rc != 0) { | ||
635 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", | ||
636 | netdev->irq, rc); | ||
631 | do { | 637 | do { |
632 | rc = h_free_logical_lan(adapter->vdev->unit_address); | 638 | rc = h_free_logical_lan(adapter->vdev->unit_address); |
633 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 639 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); |
@@ -640,7 +646,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
640 | adapter->bounce_buffer = | 646 | adapter->bounce_buffer = |
641 | kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); | 647 | kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); |
642 | if (!adapter->bounce_buffer) { | 648 | if (!adapter->bounce_buffer) { |
643 | ibmveth_error_printk("unable to allocate bounce buffer\n"); | 649 | netdev_err(netdev, "unable to allocate bounce buffer\n"); |
644 | ibmveth_cleanup(adapter); | 650 | ibmveth_cleanup(adapter); |
645 | napi_disable(&adapter->napi); | 651 | napi_disable(&adapter->napi); |
646 | return -ENOMEM; | 652 | return -ENOMEM; |
@@ -649,18 +655,18 @@ static int ibmveth_open(struct net_device *netdev) | |||
649 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | 655 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, |
650 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | 656 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); |
651 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { | 657 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { |
652 | ibmveth_error_printk("unable to map bounce buffer\n"); | 658 | netdev_err(netdev, "unable to map bounce buffer\n"); |
653 | ibmveth_cleanup(adapter); | 659 | ibmveth_cleanup(adapter); |
654 | napi_disable(&adapter->napi); | 660 | napi_disable(&adapter->napi); |
655 | return -ENOMEM; | 661 | return -ENOMEM; |
656 | } | 662 | } |
657 | 663 | ||
658 | ibmveth_debug_printk("initial replenish cycle\n"); | 664 | netdev_dbg(netdev, "initial replenish cycle\n"); |
659 | ibmveth_interrupt(netdev->irq, netdev); | 665 | ibmveth_interrupt(netdev->irq, netdev); |
660 | 666 | ||
661 | netif_start_queue(netdev); | 667 | netif_start_queue(netdev); |
662 | 668 | ||
663 | ibmveth_debug_printk("open complete\n"); | 669 | netdev_dbg(netdev, "open complete\n"); |
664 | 670 | ||
665 | return 0; | 671 | return 0; |
666 | } | 672 | } |
@@ -670,7 +676,7 @@ static int ibmveth_close(struct net_device *netdev) | |||
670 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 676 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
671 | long lpar_rc; | 677 | long lpar_rc; |
672 | 678 | ||
673 | ibmveth_debug_printk("close starting\n"); | 679 | netdev_dbg(netdev, "close starting\n"); |
674 | 680 | ||
675 | napi_disable(&adapter->napi); | 681 | napi_disable(&adapter->napi); |
676 | 682 | ||
@@ -683,26 +689,29 @@ static int ibmveth_close(struct net_device *netdev) | |||
683 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); | 689 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
684 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); | 690 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
685 | 691 | ||
686 | if(lpar_rc != H_SUCCESS) | 692 | if (lpar_rc != H_SUCCESS) { |
687 | { | 693 | netdev_err(netdev, "h_free_logical_lan failed with %lx, " |
688 | ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", | 694 | "continuing with close\n", lpar_rc); |
689 | lpar_rc); | ||
690 | } | 695 | } |
691 | 696 | ||
692 | free_irq(netdev->irq, netdev); | 697 | free_irq(netdev->irq, netdev); |
693 | 698 | ||
694 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 699 | adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + |
700 | 4096 - 8); | ||
695 | 701 | ||
696 | ibmveth_cleanup(adapter); | 702 | ibmveth_cleanup(adapter); |
697 | 703 | ||
698 | ibmveth_debug_printk("close complete\n"); | 704 | netdev_dbg(netdev, "close complete\n"); |
699 | 705 | ||
700 | return 0; | 706 | return 0; |
701 | } | 707 | } |
702 | 708 | ||
703 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { | 709 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
704 | cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); | 710 | { |
705 | cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); | 711 | cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | |
712 | SUPPORTED_FIBRE); | ||
713 | cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | | ||
714 | ADVERTISED_FIBRE); | ||
706 | cmd->speed = SPEED_1000; | 715 | cmd->speed = SPEED_1000; |
707 | cmd->duplex = DUPLEX_FULL; | 716 | cmd->duplex = DUPLEX_FULL; |
708 | cmd->port = PORT_FIBRE; | 717 | cmd->port = PORT_FIBRE; |
@@ -714,12 +723,16 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
714 | return 0; | 723 | return 0; |
715 | } | 724 | } |
716 | 725 | ||
717 | static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { | 726 | static void netdev_get_drvinfo(struct net_device *dev, |
727 | struct ethtool_drvinfo *info) | ||
728 | { | ||
718 | strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); | 729 | strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); |
719 | strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); | 730 | strncpy(info->version, ibmveth_driver_version, |
731 | sizeof(info->version) - 1); | ||
720 | } | 732 | } |
721 | 733 | ||
722 | static u32 netdev_get_link(struct net_device *dev) { | 734 | static u32 netdev_get_link(struct net_device *dev) |
735 | { | ||
723 | return 1; | 736 | return 1; |
724 | } | 737 | } |
725 | 738 | ||
@@ -727,18 +740,20 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) | |||
727 | { | 740 | { |
728 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 741 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
729 | 742 | ||
730 | if (data) | 743 | if (data) { |
731 | adapter->rx_csum = 1; | 744 | adapter->rx_csum = 1; |
732 | else { | 745 | } else { |
733 | /* | 746 | /* |
734 | * Since the ibmveth firmware interface does not have the concept of | 747 | * Since the ibmveth firmware interface does not have the |
735 | * separate tx/rx checksum offload enable, if rx checksum is disabled | 748 | * concept of separate tx/rx checksum offload enable, if rx |
736 | * we also have to disable tx checksum offload. Once we disable rx | 749 | * checksum is disabled we also have to disable tx checksum |
737 | * checksum offload, we are no longer allowed to send tx buffers that | 750 | * offload. Once we disable rx checksum offload, we are no |
738 | * are not properly checksummed. | 751 | * longer allowed to send tx buffers that are not properly |
752 | * checksummed. | ||
739 | */ | 753 | */ |
740 | adapter->rx_csum = 0; | 754 | adapter->rx_csum = 0; |
741 | dev->features &= ~NETIF_F_IP_CSUM; | 755 | dev->features &= ~NETIF_F_IP_CSUM; |
756 | dev->features &= ~NETIF_F_IPV6_CSUM; | ||
742 | } | 757 | } |
743 | } | 758 | } |
744 | 759 | ||
@@ -747,10 +762,15 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) | |||
747 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 762 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
748 | 763 | ||
749 | if (data) { | 764 | if (data) { |
750 | dev->features |= NETIF_F_IP_CSUM; | 765 | if (adapter->fw_ipv4_csum_support) |
766 | dev->features |= NETIF_F_IP_CSUM; | ||
767 | if (adapter->fw_ipv6_csum_support) | ||
768 | dev->features |= NETIF_F_IPV6_CSUM; | ||
751 | adapter->rx_csum = 1; | 769 | adapter->rx_csum = 1; |
752 | } else | 770 | } else { |
753 | dev->features &= ~NETIF_F_IP_CSUM; | 771 | dev->features &= ~NETIF_F_IP_CSUM; |
772 | dev->features &= ~NETIF_F_IPV6_CSUM; | ||
773 | } | ||
754 | } | 774 | } |
755 | 775 | ||
756 | static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | 776 | static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, |
@@ -758,7 +778,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | |||
758 | { | 778 | { |
759 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 779 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
760 | unsigned long set_attr, clr_attr, ret_attr; | 780 | unsigned long set_attr, clr_attr, ret_attr; |
761 | long ret; | 781 | unsigned long set_attr6, clr_attr6; |
782 | long ret, ret6; | ||
762 | int rc1 = 0, rc2 = 0; | 783 | int rc1 = 0, rc2 = 0; |
763 | int restart = 0; | 784 | int restart = 0; |
764 | 785 | ||
@@ -772,10 +793,13 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | |||
772 | set_attr = 0; | 793 | set_attr = 0; |
773 | clr_attr = 0; | 794 | clr_attr = 0; |
774 | 795 | ||
775 | if (data) | 796 | if (data) { |
776 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; | 797 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
777 | else | 798 | set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; |
799 | } else { | ||
778 | clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; | 800 | clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
801 | clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; | ||
802 | } | ||
779 | 803 | ||
780 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); | 804 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); |
781 | 805 | ||
@@ -786,18 +810,39 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, | |||
786 | set_attr, &ret_attr); | 810 | set_attr, &ret_attr); |
787 | 811 | ||
788 | if (ret != H_SUCCESS) { | 812 | if (ret != H_SUCCESS) { |
789 | rc1 = -EIO; | 813 | netdev_err(dev, "unable to change IPv4 checksum " |
790 | ibmveth_error_printk("unable to change checksum offload settings." | 814 | "offload settings. %d rc=%ld\n", |
791 | " %d rc=%ld\n", data, ret); | 815 | data, ret); |
792 | 816 | ||
793 | ret = h_illan_attributes(adapter->vdev->unit_address, | 817 | ret = h_illan_attributes(adapter->vdev->unit_address, |
794 | set_attr, clr_attr, &ret_attr); | 818 | set_attr, clr_attr, &ret_attr); |
819 | } else { | ||
820 | adapter->fw_ipv4_csum_support = data; | ||
821 | } | ||
822 | |||
823 | ret6 = h_illan_attributes(adapter->vdev->unit_address, | ||
824 | clr_attr6, set_attr6, &ret_attr); | ||
825 | |||
826 | if (ret6 != H_SUCCESS) { | ||
827 | netdev_err(dev, "unable to change IPv6 checksum " | ||
828 | "offload settings. %d rc=%ld\n", | ||
829 | data, ret); | ||
830 | |||
831 | ret = h_illan_attributes(adapter->vdev->unit_address, | ||
832 | set_attr6, clr_attr6, | ||
833 | &ret_attr); | ||
795 | } else | 834 | } else |
835 | adapter->fw_ipv6_csum_support = data; | ||
836 | |||
837 | if (ret == H_SUCCESS || ret6 == H_SUCCESS) | ||
796 | done(dev, data); | 838 | done(dev, data); |
839 | else | ||
840 | rc1 = -EIO; | ||
797 | } else { | 841 | } else { |
798 | rc1 = -EIO; | 842 | rc1 = -EIO; |
799 | ibmveth_error_printk("unable to change checksum offload settings." | 843 | netdev_err(dev, "unable to change checksum offload settings." |
800 | " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); | 844 | " %d rc=%ld ret_attr=%lx\n", data, ret, |
845 | ret_attr); | ||
801 | } | 846 | } |
802 | 847 | ||
803 | if (restart) | 848 | if (restart) |
@@ -821,13 +866,14 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) | |||
821 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 866 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
822 | int rc = 0; | 867 | int rc = 0; |
823 | 868 | ||
824 | if (data && (dev->features & NETIF_F_IP_CSUM)) | 869 | if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) |
825 | return 0; | 870 | return 0; |
826 | if (!data && !(dev->features & NETIF_F_IP_CSUM)) | 871 | if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) |
827 | return 0; | 872 | return 0; |
828 | 873 | ||
829 | if (data && !adapter->rx_csum) | 874 | if (data && !adapter->rx_csum) |
830 | rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); | 875 | rc = ibmveth_set_csum_offload(dev, data, |
876 | ibmveth_set_tx_csum_flags); | ||
831 | else | 877 | else |
832 | ibmveth_set_tx_csum_flags(dev, data); | 878 | ibmveth_set_tx_csum_flags(dev, data); |
833 | 879 | ||
@@ -881,6 +927,7 @@ static const struct ethtool_ops netdev_ethtool_ops = { | |||
881 | .get_strings = ibmveth_get_strings, | 927 | .get_strings = ibmveth_get_strings, |
882 | .get_sset_count = ibmveth_get_sset_count, | 928 | .get_sset_count = ibmveth_get_sset_count, |
883 | .get_ethtool_stats = ibmveth_get_ethtool_stats, | 929 | .get_ethtool_stats = ibmveth_get_ethtool_stats, |
930 | .set_sg = ethtool_op_set_sg, | ||
884 | }; | 931 | }; |
885 | 932 | ||
886 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 933 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
@@ -890,129 +937,216 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
890 | 937 | ||
891 | #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) | 938 | #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) |
892 | 939 | ||
893 | static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, | 940 | static int ibmveth_send(struct ibmveth_adapter *adapter, |
894 | struct net_device *netdev) | 941 | union ibmveth_buf_desc *descs) |
895 | { | 942 | { |
896 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | ||
897 | union ibmveth_buf_desc desc; | ||
898 | unsigned long lpar_rc; | ||
899 | unsigned long correlator; | 943 | unsigned long correlator; |
900 | unsigned long flags; | ||
901 | unsigned int retry_count; | 944 | unsigned int retry_count; |
902 | unsigned int tx_dropped = 0; | 945 | unsigned long ret; |
903 | unsigned int tx_bytes = 0; | 946 | |
904 | unsigned int tx_packets = 0; | 947 | /* |
905 | unsigned int tx_send_failed = 0; | 948 | * The retry count sets a maximum for the number of broadcast and |
906 | unsigned int tx_map_failed = 0; | 949 | * multicast destinations within the system. |
907 | int used_bounce = 0; | 950 | */ |
908 | unsigned long data_dma_addr; | 951 | retry_count = 1024; |
952 | correlator = 0; | ||
953 | do { | ||
954 | ret = h_send_logical_lan(adapter->vdev->unit_address, | ||
955 | descs[0].desc, descs[1].desc, | ||
956 | descs[2].desc, descs[3].desc, | ||
957 | descs[4].desc, descs[5].desc, | ||
958 | correlator, &correlator); | ||
959 | } while ((ret == H_BUSY) && (retry_count--)); | ||
960 | |||
961 | if (ret != H_SUCCESS && ret != H_DROPPED) { | ||
962 | netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " | ||
963 | "with rc=%ld\n", ret); | ||
964 | return 1; | ||
965 | } | ||
966 | |||
967 | return 0; | ||
968 | } | ||
909 | 969 | ||
910 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; | 970 | static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, |
971 | struct net_device *netdev) | ||
972 | { | ||
973 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | ||
974 | unsigned int desc_flags; | ||
975 | union ibmveth_buf_desc descs[6]; | ||
976 | int last, i; | ||
977 | int force_bounce = 0; | ||
978 | |||
979 | /* | ||
980 | * veth handles a maximum of 6 segments including the header, so | ||
981 | * we have to linearize the skb if there are more than this. | ||
982 | */ | ||
983 | if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) { | ||
984 | netdev->stats.tx_dropped++; | ||
985 | goto out; | ||
986 | } | ||
911 | 987 | ||
988 | /* veth can't checksum offload UDP */ | ||
912 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 989 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
913 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 990 | ((skb->protocol == htons(ETH_P_IP) && |
914 | ibmveth_error_printk("tx: failed to checksum packet\n"); | 991 | ip_hdr(skb)->protocol != IPPROTO_TCP) || |
915 | tx_dropped++; | 992 | (skb->protocol == htons(ETH_P_IPV6) && |
993 | ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && | ||
994 | skb_checksum_help(skb)) { | ||
995 | |||
996 | netdev_err(netdev, "tx: failed to checksum packet\n"); | ||
997 | netdev->stats.tx_dropped++; | ||
916 | goto out; | 998 | goto out; |
917 | } | 999 | } |
918 | 1000 | ||
1001 | desc_flags = IBMVETH_BUF_VALID; | ||
1002 | |||
919 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1003 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
920 | unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; | 1004 | unsigned char *buf = skb_transport_header(skb) + |
1005 | skb->csum_offset; | ||
921 | 1006 | ||
922 | desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); | 1007 | desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); |
923 | 1008 | ||
924 | /* Need to zero out the checksum */ | 1009 | /* Need to zero out the checksum */ |
925 | buf[0] = 0; | 1010 | buf[0] = 0; |
926 | buf[1] = 0; | 1011 | buf[1] = 0; |
927 | } | 1012 | } |
928 | 1013 | ||
929 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 1014 | retry_bounce: |
930 | skb->len, DMA_TO_DEVICE); | 1015 | memset(descs, 0, sizeof(descs)); |
931 | if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { | 1016 | |
932 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 1017 | /* |
933 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 1018 | * If a linear packet is below the rx threshold then |
1019 | * copy it into the static bounce buffer. This avoids the | ||
1020 | * cost of a TCE insert and remove. | ||
1021 | */ | ||
1022 | if (force_bounce || (!skb_is_nonlinear(skb) && | ||
1023 | (skb->len < tx_copybreak))) { | ||
934 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, | 1024 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, |
935 | skb->len); | 1025 | skb->len); |
936 | desc.fields.address = adapter->bounce_buffer_dma; | 1026 | |
937 | tx_map_failed++; | 1027 | descs[0].fields.flags_len = desc_flags | skb->len; |
938 | used_bounce = 1; | 1028 | descs[0].fields.address = adapter->bounce_buffer_dma; |
939 | wmb(); | 1029 | |
940 | } else | 1030 | if (ibmveth_send(adapter, descs)) { |
941 | desc.fields.address = data_dma_addr; | 1031 | adapter->tx_send_failed++; |
942 | 1032 | netdev->stats.tx_dropped++; | |
943 | /* send the frame. Arbitrarily set retrycount to 1024 */ | 1033 | } else { |
944 | correlator = 0; | 1034 | netdev->stats.tx_packets++; |
945 | retry_count = 1024; | 1035 | netdev->stats.tx_bytes += skb->len; |
946 | do { | 1036 | } |
947 | lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, | 1037 | |
948 | desc.desc, 0, 0, 0, 0, 0, | 1038 | goto out; |
949 | correlator, &correlator); | 1039 | } |
950 | } while ((lpar_rc == H_BUSY) && (retry_count--)); | 1040 | |
951 | 1041 | /* Map the header */ | |
952 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { | 1042 | descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, |
953 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); | 1043 | skb_headlen(skb), |
954 | ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", | 1044 | DMA_TO_DEVICE); |
955 | (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, | 1045 | if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) |
956 | skb->len, desc.fields.address); | 1046 | goto map_failed; |
957 | tx_send_failed++; | 1047 | |
958 | tx_dropped++; | 1048 | descs[0].fields.flags_len = desc_flags | skb_headlen(skb); |
959 | } else { | 1049 | |
960 | tx_packets++; | 1050 | /* Map the frags */ |
961 | tx_bytes += skb->len; | 1051 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
962 | netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ | 1052 | unsigned long dma_addr; |
1053 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1054 | |||
1055 | dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, | ||
1056 | frag->page_offset, frag->size, | ||
1057 | DMA_TO_DEVICE); | ||
1058 | |||
1059 | if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) | ||
1060 | goto map_failed_frags; | ||
1061 | |||
1062 | descs[i+1].fields.flags_len = desc_flags | frag->size; | ||
1063 | descs[i+1].fields.address = dma_addr; | ||
963 | } | 1064 | } |
964 | 1065 | ||
965 | if (!used_bounce) | 1066 | if (ibmveth_send(adapter, descs)) { |
966 | dma_unmap_single(&adapter->vdev->dev, data_dma_addr, | 1067 | adapter->tx_send_failed++; |
967 | skb->len, DMA_TO_DEVICE); | 1068 | netdev->stats.tx_dropped++; |
1069 | } else { | ||
1070 | netdev->stats.tx_packets++; | ||
1071 | netdev->stats.tx_bytes += skb->len; | ||
1072 | } | ||
968 | 1073 | ||
969 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | 1074 | for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) |
970 | netdev->stats.tx_dropped += tx_dropped; | 1075 | dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, |
971 | netdev->stats.tx_bytes += tx_bytes; | 1076 | descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, |
972 | netdev->stats.tx_packets += tx_packets; | 1077 | DMA_TO_DEVICE); |
973 | adapter->tx_send_failed += tx_send_failed; | ||
974 | adapter->tx_map_failed += tx_map_failed; | ||
975 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
976 | 1078 | ||
1079 | out: | ||
977 | dev_kfree_skb(skb); | 1080 | dev_kfree_skb(skb); |
978 | return NETDEV_TX_OK; | 1081 | return NETDEV_TX_OK; |
1082 | |||
1083 | map_failed_frags: | ||
1084 | last = i+1; | ||
1085 | for (i = 0; i < last; i++) | ||
1086 | dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, | ||
1087 | descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, | ||
1088 | DMA_TO_DEVICE); | ||
1089 | |||
1090 | map_failed: | ||
1091 | if (!firmware_has_feature(FW_FEATURE_CMO)) | ||
1092 | netdev_err(netdev, "tx: unable to map xmit buffer\n"); | ||
1093 | adapter->tx_map_failed++; | ||
1094 | skb_linearize(skb); | ||
1095 | force_bounce = 1; | ||
1096 | goto retry_bounce; | ||
979 | } | 1097 | } |
980 | 1098 | ||
981 | static int ibmveth_poll(struct napi_struct *napi, int budget) | 1099 | static int ibmveth_poll(struct napi_struct *napi, int budget) |
982 | { | 1100 | { |
983 | struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); | 1101 | struct ibmveth_adapter *adapter = |
1102 | container_of(napi, struct ibmveth_adapter, napi); | ||
984 | struct net_device *netdev = adapter->netdev; | 1103 | struct net_device *netdev = adapter->netdev; |
985 | int frames_processed = 0; | 1104 | int frames_processed = 0; |
986 | unsigned long lpar_rc; | 1105 | unsigned long lpar_rc; |
987 | 1106 | ||
988 | restart_poll: | 1107 | restart_poll: |
989 | do { | 1108 | do { |
990 | struct sk_buff *skb; | ||
991 | |||
992 | if (!ibmveth_rxq_pending_buffer(adapter)) | 1109 | if (!ibmveth_rxq_pending_buffer(adapter)) |
993 | break; | 1110 | break; |
994 | 1111 | ||
995 | rmb(); | 1112 | smp_rmb(); |
996 | if (!ibmveth_rxq_buffer_valid(adapter)) { | 1113 | if (!ibmveth_rxq_buffer_valid(adapter)) { |
997 | wmb(); /* suggested by larson1 */ | 1114 | wmb(); /* suggested by larson1 */ |
998 | adapter->rx_invalid_buffer++; | 1115 | adapter->rx_invalid_buffer++; |
999 | ibmveth_debug_printk("recycling invalid buffer\n"); | 1116 | netdev_dbg(netdev, "recycling invalid buffer\n"); |
1000 | ibmveth_rxq_recycle_buffer(adapter); | 1117 | ibmveth_rxq_recycle_buffer(adapter); |
1001 | } else { | 1118 | } else { |
1119 | struct sk_buff *skb, *new_skb; | ||
1002 | int length = ibmveth_rxq_frame_length(adapter); | 1120 | int length = ibmveth_rxq_frame_length(adapter); |
1003 | int offset = ibmveth_rxq_frame_offset(adapter); | 1121 | int offset = ibmveth_rxq_frame_offset(adapter); |
1004 | int csum_good = ibmveth_rxq_csum_good(adapter); | 1122 | int csum_good = ibmveth_rxq_csum_good(adapter); |
1005 | 1123 | ||
1006 | skb = ibmveth_rxq_get_buffer(adapter); | 1124 | skb = ibmveth_rxq_get_buffer(adapter); |
1007 | if (csum_good) | ||
1008 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1009 | 1125 | ||
1010 | ibmveth_rxq_harvest_buffer(adapter); | 1126 | new_skb = NULL; |
1127 | if (length < rx_copybreak) | ||
1128 | new_skb = netdev_alloc_skb(netdev, length); | ||
1129 | |||
1130 | if (new_skb) { | ||
1131 | skb_copy_to_linear_data(new_skb, | ||
1132 | skb->data + offset, | ||
1133 | length); | ||
1134 | if (rx_flush) | ||
1135 | ibmveth_flush_buffer(skb->data, | ||
1136 | length + offset); | ||
1137 | skb = new_skb; | ||
1138 | ibmveth_rxq_recycle_buffer(adapter); | ||
1139 | } else { | ||
1140 | ibmveth_rxq_harvest_buffer(adapter); | ||
1141 | skb_reserve(skb, offset); | ||
1142 | } | ||
1011 | 1143 | ||
1012 | skb_reserve(skb, offset); | ||
1013 | skb_put(skb, length); | 1144 | skb_put(skb, length); |
1014 | skb->protocol = eth_type_trans(skb, netdev); | 1145 | skb->protocol = eth_type_trans(skb, netdev); |
1015 | 1146 | ||
1147 | if (csum_good) | ||
1148 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1149 | |||
1016 | netif_receive_skb(skb); /* send it up */ | 1150 | netif_receive_skb(skb); /* send it up */ |
1017 | 1151 | ||
1018 | netdev->stats.rx_packets++; | 1152 | netdev->stats.rx_packets++; |
@@ -1030,7 +1164,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) | |||
1030 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | 1164 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
1031 | VIO_IRQ_ENABLE); | 1165 | VIO_IRQ_ENABLE); |
1032 | 1166 | ||
1033 | ibmveth_assert(lpar_rc == H_SUCCESS); | 1167 | BUG_ON(lpar_rc != H_SUCCESS); |
1034 | 1168 | ||
1035 | napi_complete(napi); | 1169 | napi_complete(napi); |
1036 | 1170 | ||
@@ -1054,7 +1188,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | |||
1054 | if (napi_schedule_prep(&adapter->napi)) { | 1188 | if (napi_schedule_prep(&adapter->napi)) { |
1055 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | 1189 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
1056 | VIO_IRQ_DISABLE); | 1190 | VIO_IRQ_DISABLE); |
1057 | ibmveth_assert(lpar_rc == H_SUCCESS); | 1191 | BUG_ON(lpar_rc != H_SUCCESS); |
1058 | __napi_schedule(&adapter->napi); | 1192 | __napi_schedule(&adapter->napi); |
1059 | } | 1193 | } |
1060 | return IRQ_HANDLED; | 1194 | return IRQ_HANDLED; |
@@ -1071,8 +1205,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1071 | IbmVethMcastEnableRecv | | 1205 | IbmVethMcastEnableRecv | |
1072 | IbmVethMcastDisableFiltering, | 1206 | IbmVethMcastDisableFiltering, |
1073 | 0); | 1207 | 0); |
1074 | if(lpar_rc != H_SUCCESS) { | 1208 | if (lpar_rc != H_SUCCESS) { |
1075 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); | 1209 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1210 | "entering promisc mode\n", lpar_rc); | ||
1076 | } | 1211 | } |
1077 | } else { | 1212 | } else { |
1078 | struct netdev_hw_addr *ha; | 1213 | struct netdev_hw_addr *ha; |
@@ -1082,19 +1217,23 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1082 | IbmVethMcastDisableFiltering | | 1217 | IbmVethMcastDisableFiltering | |
1083 | IbmVethMcastClearFilterTable, | 1218 | IbmVethMcastClearFilterTable, |
1084 | 0); | 1219 | 0); |
1085 | if(lpar_rc != H_SUCCESS) { | 1220 | if (lpar_rc != H_SUCCESS) { |
1086 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); | 1221 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1222 | "attempting to clear filter table\n", | ||
1223 | lpar_rc); | ||
1087 | } | 1224 | } |
1088 | /* add the addresses to the filter table */ | 1225 | /* add the addresses to the filter table */ |
1089 | netdev_for_each_mc_addr(ha, netdev) { | 1226 | netdev_for_each_mc_addr(ha, netdev) { |
1090 | // add the multicast address to the filter table | 1227 | /* add the multicast address to the filter table */ |
1091 | unsigned long mcast_addr = 0; | 1228 | unsigned long mcast_addr = 0; |
1092 | memcpy(((char *)&mcast_addr)+2, ha->addr, 6); | 1229 | memcpy(((char *)&mcast_addr)+2, ha->addr, 6); |
1093 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 1230 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1094 | IbmVethMcastAddFilter, | 1231 | IbmVethMcastAddFilter, |
1095 | mcast_addr); | 1232 | mcast_addr); |
1096 | if(lpar_rc != H_SUCCESS) { | 1233 | if (lpar_rc != H_SUCCESS) { |
1097 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); | 1234 | netdev_err(netdev, "h_multicast_ctrl rc=%ld " |
1235 | "when adding an entry to the filter " | ||
1236 | "table\n", lpar_rc); | ||
1098 | } | 1237 | } |
1099 | } | 1238 | } |
1100 | 1239 | ||
@@ -1102,8 +1241,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1102 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 1241 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1103 | IbmVethMcastEnableFiltering, | 1242 | IbmVethMcastEnableFiltering, |
1104 | 0); | 1243 | 0); |
1105 | if(lpar_rc != H_SUCCESS) { | 1244 | if (lpar_rc != H_SUCCESS) { |
1106 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); | 1245 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1246 | "enabling filtering\n", lpar_rc); | ||
1107 | } | 1247 | } |
1108 | } | 1248 | } |
1109 | } | 1249 | } |
@@ -1116,14 +1256,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1116 | int i, rc; | 1256 | int i, rc; |
1117 | int need_restart = 0; | 1257 | int need_restart = 0; |
1118 | 1258 | ||
1119 | if (new_mtu < IBMVETH_MAX_MTU) | 1259 | if (new_mtu < IBMVETH_MIN_MTU) |
1120 | return -EINVAL; | 1260 | return -EINVAL; |
1121 | 1261 | ||
1122 | for (i = 0; i < IbmVethNumBufferPools; i++) | 1262 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
1123 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) | 1263 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) |
1124 | break; | 1264 | break; |
1125 | 1265 | ||
1126 | if (i == IbmVethNumBufferPools) | 1266 | if (i == IBMVETH_NUM_BUFF_POOLS) |
1127 | return -EINVAL; | 1267 | return -EINVAL; |
1128 | 1268 | ||
1129 | /* Deactivate all the buffer pools so that the next loop can activate | 1269 | /* Deactivate all the buffer pools so that the next loop can activate |
@@ -1136,7 +1276,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1136 | } | 1276 | } |
1137 | 1277 | ||
1138 | /* Look for an active buffer pool that can hold the new MTU */ | 1278 | /* Look for an active buffer pool that can hold the new MTU */ |
1139 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1279 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1140 | adapter->rx_buff_pool[i].active = 1; | 1280 | adapter->rx_buff_pool[i].active = 1; |
1141 | 1281 | ||
1142 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1282 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
@@ -1190,7 +1330,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | |||
1190 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | 1330 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; |
1191 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); | 1331 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); |
1192 | 1332 | ||
1193 | for (i = 0; i < IbmVethNumBufferPools; i++) { | 1333 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1194 | /* add the size of the active receive buffers */ | 1334 | /* add the size of the active receive buffers */ |
1195 | if (adapter->rx_buff_pool[i].active) | 1335 | if (adapter->rx_buff_pool[i].active) |
1196 | ret += | 1336 | ret += |
@@ -1219,41 +1359,36 @@ static const struct net_device_ops ibmveth_netdev_ops = { | |||
1219 | #endif | 1359 | #endif |
1220 | }; | 1360 | }; |
1221 | 1361 | ||
1222 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 1362 | static int __devinit ibmveth_probe(struct vio_dev *dev, |
1363 | const struct vio_device_id *id) | ||
1223 | { | 1364 | { |
1224 | int rc, i; | 1365 | int rc, i; |
1225 | long ret; | ||
1226 | struct net_device *netdev; | 1366 | struct net_device *netdev; |
1227 | struct ibmveth_adapter *adapter; | 1367 | struct ibmveth_adapter *adapter; |
1228 | unsigned long set_attr, ret_attr; | ||
1229 | |||
1230 | unsigned char *mac_addr_p; | 1368 | unsigned char *mac_addr_p; |
1231 | unsigned int *mcastFilterSize_p; | 1369 | unsigned int *mcastFilterSize_p; |
1232 | 1370 | ||
1371 | dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", | ||
1372 | dev->unit_address); | ||
1233 | 1373 | ||
1234 | ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", | 1374 | mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, |
1235 | dev->unit_address); | 1375 | NULL); |
1236 | 1376 | if (!mac_addr_p) { | |
1237 | mac_addr_p = (unsigned char *) vio_get_attribute(dev, | 1377 | dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); |
1238 | VETH_MAC_ADDR, NULL); | 1378 | return -EINVAL; |
1239 | if(!mac_addr_p) { | ||
1240 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR " | ||
1241 | "attribute\n", __FILE__, __LINE__); | ||
1242 | return 0; | ||
1243 | } | 1379 | } |
1244 | 1380 | ||
1245 | mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev, | 1381 | mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, |
1246 | VETH_MCAST_FILTER_SIZE, NULL); | 1382 | VETH_MCAST_FILTER_SIZE, NULL); |
1247 | if(!mcastFilterSize_p) { | 1383 | if (!mcastFilterSize_p) { |
1248 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " | 1384 | dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " |
1249 | "VETH_MCAST_FILTER_SIZE attribute\n", | 1385 | "attribute\n"); |
1250 | __FILE__, __LINE__); | 1386 | return -EINVAL; |
1251 | return 0; | ||
1252 | } | 1387 | } |
1253 | 1388 | ||
1254 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); | 1389 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); |
1255 | 1390 | ||
1256 | if(!netdev) | 1391 | if (!netdev) |
1257 | return -ENOMEM; | 1392 | return -ENOMEM; |
1258 | 1393 | ||
1259 | adapter = netdev_priv(netdev); | 1394 | adapter = netdev_priv(netdev); |
@@ -1261,19 +1396,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1261 | 1396 | ||
1262 | adapter->vdev = dev; | 1397 | adapter->vdev = dev; |
1263 | adapter->netdev = netdev; | 1398 | adapter->netdev = netdev; |
1264 | adapter->mcastFilterSize= *mcastFilterSize_p; | 1399 | adapter->mcastFilterSize = *mcastFilterSize_p; |
1265 | adapter->pool_config = 0; | 1400 | adapter->pool_config = 0; |
1266 | 1401 | ||
1267 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | 1402 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); |
1268 | 1403 | ||
1269 | /* Some older boxes running PHYP non-natively have an OF that | 1404 | /* |
1270 | returns a 8-byte local-mac-address field (and the first | 1405 | * Some older boxes running PHYP non-natively have an OF that returns |
1271 | 2 bytes have to be ignored) while newer boxes' OF return | 1406 | * a 8-byte local-mac-address field (and the first 2 bytes have to be |
1272 | a 6-byte field. Note that IEEE 1275 specifies that | 1407 | * ignored) while newer boxes' OF return a 6-byte field. Note that |
1273 | local-mac-address must be a 6-byte field. | 1408 | * IEEE 1275 specifies that local-mac-address must be a 6-byte field. |
1274 | The RPA doc specifies that the first byte must be 10b, so | 1409 | * The RPA doc specifies that the first byte must be 10b, so we'll |
1275 | we'll just look for it to solve this 8 vs. 6 byte field issue */ | 1410 | * just look for it to solve this 8 vs. 6 byte field issue |
1276 | 1411 | */ | |
1277 | if ((*mac_addr_p & 0x3) != 0x02) | 1412 | if ((*mac_addr_p & 0x3) != 0x02) |
1278 | mac_addr_p += 2; | 1413 | mac_addr_p += 2; |
1279 | 1414 | ||
@@ -1284,12 +1419,11 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1284 | netdev->netdev_ops = &ibmveth_netdev_ops; | 1419 | netdev->netdev_ops = &ibmveth_netdev_ops; |
1285 | netdev->ethtool_ops = &netdev_ethtool_ops; | 1420 | netdev->ethtool_ops = &netdev_ethtool_ops; |
1286 | SET_NETDEV_DEV(netdev, &dev->dev); | 1421 | SET_NETDEV_DEV(netdev, &dev->dev); |
1287 | netdev->features |= NETIF_F_LLTX; | 1422 | netdev->features |= NETIF_F_SG; |
1288 | spin_lock_init(&adapter->stats_lock); | ||
1289 | 1423 | ||
1290 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1424 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); |
1291 | 1425 | ||
1292 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1426 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1293 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; | 1427 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
1294 | int error; | 1428 | int error; |
1295 | 1429 | ||
@@ -1302,41 +1436,25 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1302 | kobject_uevent(kobj, KOBJ_ADD); | 1436 | kobject_uevent(kobj, KOBJ_ADD); |
1303 | } | 1437 | } |
1304 | 1438 | ||
1305 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); | 1439 | netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); |
1306 | 1440 | ||
1307 | adapter->buffer_list_dma = DMA_ERROR_CODE; | 1441 | adapter->buffer_list_dma = DMA_ERROR_CODE; |
1308 | adapter->filter_list_dma = DMA_ERROR_CODE; | 1442 | adapter->filter_list_dma = DMA_ERROR_CODE; |
1309 | adapter->rx_queue.queue_dma = DMA_ERROR_CODE; | 1443 | adapter->rx_queue.queue_dma = DMA_ERROR_CODE; |
1310 | 1444 | ||
1311 | ibmveth_debug_printk("registering netdev...\n"); | 1445 | netdev_dbg(netdev, "registering netdev...\n"); |
1312 | |||
1313 | ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr); | ||
1314 | |||
1315 | if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && | ||
1316 | !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && | ||
1317 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { | ||
1318 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; | ||
1319 | |||
1320 | ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr); | ||
1321 | 1446 | ||
1322 | if (ret == H_SUCCESS) { | 1447 | ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags); |
1323 | adapter->rx_csum = 1; | ||
1324 | netdev->features |= NETIF_F_IP_CSUM; | ||
1325 | } else | ||
1326 | ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr); | ||
1327 | } | ||
1328 | 1448 | ||
1329 | rc = register_netdev(netdev); | 1449 | rc = register_netdev(netdev); |
1330 | 1450 | ||
1331 | if(rc) { | 1451 | if (rc) { |
1332 | ibmveth_debug_printk("failed to register netdev rc=%d\n", rc); | 1452 | netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); |
1333 | free_netdev(netdev); | 1453 | free_netdev(netdev); |
1334 | return rc; | 1454 | return rc; |
1335 | } | 1455 | } |
1336 | 1456 | ||
1337 | ibmveth_debug_printk("registered\n"); | 1457 | netdev_dbg(netdev, "registered\n"); |
1338 | |||
1339 | ibmveth_proc_register_adapter(adapter); | ||
1340 | 1458 | ||
1341 | return 0; | 1459 | return 0; |
1342 | } | 1460 | } |
@@ -1347,114 +1465,23 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
1347 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 1465 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1348 | int i; | 1466 | int i; |
1349 | 1467 | ||
1350 | for(i = 0; i<IbmVethNumBufferPools; i++) | 1468 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
1351 | kobject_put(&adapter->rx_buff_pool[i].kobj); | 1469 | kobject_put(&adapter->rx_buff_pool[i].kobj); |
1352 | 1470 | ||
1353 | unregister_netdev(netdev); | 1471 | unregister_netdev(netdev); |
1354 | 1472 | ||
1355 | ibmveth_proc_unregister_adapter(adapter); | ||
1356 | |||
1357 | free_netdev(netdev); | 1473 | free_netdev(netdev); |
1358 | dev_set_drvdata(&dev->dev, NULL); | 1474 | dev_set_drvdata(&dev->dev, NULL); |
1359 | 1475 | ||
1360 | return 0; | 1476 | return 0; |
1361 | } | 1477 | } |
1362 | 1478 | ||
1363 | #ifdef CONFIG_PROC_FS | ||
1364 | static void ibmveth_proc_register_driver(void) | ||
1365 | { | ||
1366 | ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net); | ||
1367 | if (ibmveth_proc_dir) { | ||
1368 | } | ||
1369 | } | ||
1370 | |||
1371 | static void ibmveth_proc_unregister_driver(void) | ||
1372 | { | ||
1373 | remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net); | ||
1374 | } | ||
1375 | |||
1376 | static int ibmveth_show(struct seq_file *seq, void *v) | ||
1377 | { | ||
1378 | struct ibmveth_adapter *adapter = seq->private; | ||
1379 | char *current_mac = (char *) adapter->netdev->dev_addr; | ||
1380 | char *firmware_mac = (char *) &adapter->mac_addr; | ||
1381 | |||
1382 | seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); | ||
1383 | |||
1384 | seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); | ||
1385 | seq_printf(seq, "Current MAC: %pM\n", current_mac); | ||
1386 | seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac); | ||
1387 | |||
1388 | seq_printf(seq, "\nAdapter Statistics:\n"); | ||
1389 | seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed); | ||
1390 | seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed); | ||
1391 | seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles); | ||
1392 | seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem); | ||
1393 | seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure); | ||
1394 | seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer); | ||
1395 | seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer); | ||
1396 | |||
1397 | return 0; | ||
1398 | } | ||
1399 | |||
1400 | static int ibmveth_proc_open(struct inode *inode, struct file *file) | ||
1401 | { | ||
1402 | return single_open(file, ibmveth_show, PDE(inode)->data); | ||
1403 | } | ||
1404 | |||
1405 | static const struct file_operations ibmveth_proc_fops = { | ||
1406 | .owner = THIS_MODULE, | ||
1407 | .open = ibmveth_proc_open, | ||
1408 | .read = seq_read, | ||
1409 | .llseek = seq_lseek, | ||
1410 | .release = single_release, | ||
1411 | }; | ||
1412 | |||
1413 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | ||
1414 | { | ||
1415 | struct proc_dir_entry *entry; | ||
1416 | if (ibmveth_proc_dir) { | ||
1417 | char u_addr[10]; | ||
1418 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | ||
1419 | entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir, | ||
1420 | &ibmveth_proc_fops, adapter); | ||
1421 | if (!entry) | ||
1422 | ibmveth_error_printk("Cannot create adapter proc entry"); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | ||
1427 | { | ||
1428 | if (ibmveth_proc_dir) { | ||
1429 | char u_addr[10]; | ||
1430 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | ||
1431 | remove_proc_entry(u_addr, ibmveth_proc_dir); | ||
1432 | } | ||
1433 | } | ||
1434 | |||
1435 | #else /* CONFIG_PROC_FS */ | ||
1436 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | ||
1437 | { | ||
1438 | } | ||
1439 | |||
1440 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | ||
1441 | { | ||
1442 | } | ||
1443 | static void ibmveth_proc_register_driver(void) | ||
1444 | { | ||
1445 | } | ||
1446 | |||
1447 | static void ibmveth_proc_unregister_driver(void) | ||
1448 | { | ||
1449 | } | ||
1450 | #endif /* CONFIG_PROC_FS */ | ||
1451 | |||
1452 | static struct attribute veth_active_attr; | 1479 | static struct attribute veth_active_attr; |
1453 | static struct attribute veth_num_attr; | 1480 | static struct attribute veth_num_attr; |
1454 | static struct attribute veth_size_attr; | 1481 | static struct attribute veth_size_attr; |
1455 | 1482 | ||
1456 | static ssize_t veth_pool_show(struct kobject * kobj, | 1483 | static ssize_t veth_pool_show(struct kobject *kobj, |
1457 | struct attribute * attr, char * buf) | 1484 | struct attribute *attr, char *buf) |
1458 | { | 1485 | { |
1459 | struct ibmveth_buff_pool *pool = container_of(kobj, | 1486 | struct ibmveth_buff_pool *pool = container_of(kobj, |
1460 | struct ibmveth_buff_pool, | 1487 | struct ibmveth_buff_pool, |
@@ -1469,8 +1496,8 @@ static ssize_t veth_pool_show(struct kobject * kobj, | |||
1469 | return 0; | 1496 | return 0; |
1470 | } | 1497 | } |
1471 | 1498 | ||
1472 | static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, | 1499 | static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, |
1473 | const char * buf, size_t count) | 1500 | const char *buf, size_t count) |
1474 | { | 1501 | { |
1475 | struct ibmveth_buff_pool *pool = container_of(kobj, | 1502 | struct ibmveth_buff_pool *pool = container_of(kobj, |
1476 | struct ibmveth_buff_pool, | 1503 | struct ibmveth_buff_pool, |
@@ -1484,8 +1511,9 @@ const char * buf, size_t count) | |||
1484 | if (attr == &veth_active_attr) { | 1511 | if (attr == &veth_active_attr) { |
1485 | if (value && !pool->active) { | 1512 | if (value && !pool->active) { |
1486 | if (netif_running(netdev)) { | 1513 | if (netif_running(netdev)) { |
1487 | if(ibmveth_alloc_buffer_pool(pool)) { | 1514 | if (ibmveth_alloc_buffer_pool(pool)) { |
1488 | ibmveth_error_printk("unable to alloc pool\n"); | 1515 | netdev_err(netdev, |
1516 | "unable to alloc pool\n"); | ||
1489 | return -ENOMEM; | 1517 | return -ENOMEM; |
1490 | } | 1518 | } |
1491 | pool->active = 1; | 1519 | pool->active = 1; |
@@ -1494,14 +1522,15 @@ const char * buf, size_t count) | |||
1494 | adapter->pool_config = 0; | 1522 | adapter->pool_config = 0; |
1495 | if ((rc = ibmveth_open(netdev))) | 1523 | if ((rc = ibmveth_open(netdev))) |
1496 | return rc; | 1524 | return rc; |
1497 | } else | 1525 | } else { |
1498 | pool->active = 1; | 1526 | pool->active = 1; |
1527 | } | ||
1499 | } else if (!value && pool->active) { | 1528 | } else if (!value && pool->active) { |
1500 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; | 1529 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; |
1501 | int i; | 1530 | int i; |
1502 | /* Make sure there is a buffer pool with buffers that | 1531 | /* Make sure there is a buffer pool with buffers that |
1503 | can hold a packet of the size of the MTU */ | 1532 | can hold a packet of the size of the MTU */ |
1504 | for (i = 0; i < IbmVethNumBufferPools; i++) { | 1533 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1505 | if (pool == &adapter->rx_buff_pool[i]) | 1534 | if (pool == &adapter->rx_buff_pool[i]) |
1506 | continue; | 1535 | continue; |
1507 | if (!adapter->rx_buff_pool[i].active) | 1536 | if (!adapter->rx_buff_pool[i].active) |
@@ -1510,8 +1539,8 @@ const char * buf, size_t count) | |||
1510 | break; | 1539 | break; |
1511 | } | 1540 | } |
1512 | 1541 | ||
1513 | if (i == IbmVethNumBufferPools) { | 1542 | if (i == IBMVETH_NUM_BUFF_POOLS) { |
1514 | ibmveth_error_printk("no active pool >= MTU\n"); | 1543 | netdev_err(netdev, "no active pool >= MTU\n"); |
1515 | return -EPERM; | 1544 | return -EPERM; |
1516 | } | 1545 | } |
1517 | 1546 | ||
@@ -1526,9 +1555,9 @@ const char * buf, size_t count) | |||
1526 | pool->active = 0; | 1555 | pool->active = 0; |
1527 | } | 1556 | } |
1528 | } else if (attr == &veth_num_attr) { | 1557 | } else if (attr == &veth_num_attr) { |
1529 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | 1558 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { |
1530 | return -EINVAL; | 1559 | return -EINVAL; |
1531 | else { | 1560 | } else { |
1532 | if (netif_running(netdev)) { | 1561 | if (netif_running(netdev)) { |
1533 | adapter->pool_config = 1; | 1562 | adapter->pool_config = 1; |
1534 | ibmveth_close(netdev); | 1563 | ibmveth_close(netdev); |
@@ -1536,13 +1565,14 @@ const char * buf, size_t count) | |||
1536 | pool->size = value; | 1565 | pool->size = value; |
1537 | if ((rc = ibmveth_open(netdev))) | 1566 | if ((rc = ibmveth_open(netdev))) |
1538 | return rc; | 1567 | return rc; |
1539 | } else | 1568 | } else { |
1540 | pool->size = value; | 1569 | pool->size = value; |
1570 | } | ||
1541 | } | 1571 | } |
1542 | } else if (attr == &veth_size_attr) { | 1572 | } else if (attr == &veth_size_attr) { |
1543 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) | 1573 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { |
1544 | return -EINVAL; | 1574 | return -EINVAL; |
1545 | else { | 1575 | } else { |
1546 | if (netif_running(netdev)) { | 1576 | if (netif_running(netdev)) { |
1547 | adapter->pool_config = 1; | 1577 | adapter->pool_config = 1; |
1548 | ibmveth_close(netdev); | 1578 | ibmveth_close(netdev); |
@@ -1550,8 +1580,9 @@ const char * buf, size_t count) | |||
1550 | pool->buff_size = value; | 1580 | pool->buff_size = value; |
1551 | if ((rc = ibmveth_open(netdev))) | 1581 | if ((rc = ibmveth_open(netdev))) |
1552 | return rc; | 1582 | return rc; |
1553 | } else | 1583 | } else { |
1554 | pool->buff_size = value; | 1584 | pool->buff_size = value; |
1585 | } | ||
1555 | } | 1586 | } |
1556 | } | 1587 | } |
1557 | 1588 | ||
@@ -1561,16 +1592,16 @@ const char * buf, size_t count) | |||
1561 | } | 1592 | } |
1562 | 1593 | ||
1563 | 1594 | ||
1564 | #define ATTR(_name, _mode) \ | 1595 | #define ATTR(_name, _mode) \ |
1565 | struct attribute veth_##_name##_attr = { \ | 1596 | struct attribute veth_##_name##_attr = { \ |
1566 | .name = __stringify(_name), .mode = _mode, \ | 1597 | .name = __stringify(_name), .mode = _mode, \ |
1567 | }; | 1598 | }; |
1568 | 1599 | ||
1569 | static ATTR(active, 0644); | 1600 | static ATTR(active, 0644); |
1570 | static ATTR(num, 0644); | 1601 | static ATTR(num, 0644); |
1571 | static ATTR(size, 0644); | 1602 | static ATTR(size, 0644); |
1572 | 1603 | ||
1573 | static struct attribute * veth_pool_attrs[] = { | 1604 | static struct attribute *veth_pool_attrs[] = { |
1574 | &veth_active_attr, | 1605 | &veth_active_attr, |
1575 | &veth_num_attr, | 1606 | &veth_num_attr, |
1576 | &veth_size_attr, | 1607 | &veth_size_attr, |
@@ -1595,7 +1626,7 @@ static int ibmveth_resume(struct device *dev) | |||
1595 | return 0; | 1626 | return 0; |
1596 | } | 1627 | } |
1597 | 1628 | ||
1598 | static struct vio_device_id ibmveth_device_table[] __devinitdata= { | 1629 | static struct vio_device_id ibmveth_device_table[] __devinitdata = { |
1599 | { "network", "IBM,l-lan"}, | 1630 | { "network", "IBM,l-lan"}, |
1600 | { "", "" } | 1631 | { "", "" } |
1601 | }; | 1632 | }; |
@@ -1619,9 +1650,8 @@ static struct vio_driver ibmveth_driver = { | |||
1619 | 1650 | ||
1620 | static int __init ibmveth_module_init(void) | 1651 | static int __init ibmveth_module_init(void) |
1621 | { | 1652 | { |
1622 | ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); | 1653 | printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name, |
1623 | 1654 | ibmveth_driver_string, ibmveth_driver_version); | |
1624 | ibmveth_proc_register_driver(); | ||
1625 | 1655 | ||
1626 | return vio_register_driver(&ibmveth_driver); | 1656 | return vio_register_driver(&ibmveth_driver); |
1627 | } | 1657 | } |
@@ -1629,7 +1659,6 @@ static int __init ibmveth_module_init(void) | |||
1629 | static void __exit ibmveth_module_exit(void) | 1659 | static void __exit ibmveth_module_exit(void) |
1630 | { | 1660 | { |
1631 | vio_unregister_driver(&ibmveth_driver); | 1661 | vio_unregister_driver(&ibmveth_driver); |
1632 | ibmveth_proc_unregister_driver(); | ||
1633 | } | 1662 | } |
1634 | 1663 | ||
1635 | module_init(ibmveth_module_init); | 1664 | module_init(ibmveth_module_init); |