aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/fcoe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/fcoe')
-rw-r--r--drivers/scsi/fcoe/Makefile8
-rw-r--r--drivers/scsi/fcoe/fc_transport_fcoe.c443
-rw-r--r--drivers/scsi/fcoe/fcoe.c1878
-rw-r--r--drivers/scsi/fcoe/fcoe.h75
-rw-r--r--drivers/scsi/fcoe/fcoe_sw.c561
-rw-r--r--drivers/scsi/fcoe/libfcoe.c2216
6 files changed, 2994 insertions, 2187 deletions
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index b78da06d7c0e..950f27615c76 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,8 +1,2 @@
1# $Id: Makefile
2
3obj-$(CONFIG_FCOE) += fcoe.o 1obj-$(CONFIG_FCOE) += fcoe.o
4 2obj-$(CONFIG_LIBFCOE) += libfcoe.o
5fcoe-y := \
6 libfcoe.o \
7 fcoe_sw.o \
8 fc_transport_fcoe.o
diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
deleted file mode 100644
index 8862758006c0..000000000000
--- a/drivers/scsi/fcoe/fc_transport_fcoe.c
+++ /dev/null
@@ -1,443 +0,0 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/pci.h>
21#include <scsi/libfcoe.h>
22#include <scsi/fc_transport_fcoe.h>
23
24/* internal fcoe transport */
25struct fcoe_transport_internal {
26 struct fcoe_transport *t;
27 struct net_device *netdev;
28 struct list_head list;
29};
30
31/* fcoe transports list and its lock */
32static LIST_HEAD(fcoe_transports);
33static DEFINE_MUTEX(fcoe_transports_lock);
34
35/**
36 * fcoe_transport_default() - Returns ptr to the default transport fcoe_sw
37 */
38struct fcoe_transport *fcoe_transport_default(void)
39{
40 return &fcoe_sw_transport;
41}
42
43/**
44 * fcoe_transport_to_pcidev() - get the pci dev from a netdev
45 * @netdev: the netdev that pci dev will be retrived from
46 *
47 * Returns: NULL or the corrsponding pci_dev
48 */
49struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
50{
51 if (!netdev->dev.parent)
52 return NULL;
53 return to_pci_dev(netdev->dev.parent);
54}
55
56/**
57 * fcoe_transport_device_lookup() - Lookup a transport
58 * @netdev: the netdev the transport to be attached to
59 *
60 * This will look for existing offload driver, if not found, it falls back to
61 * the default sw hba (fcoe_sw) as its fcoe transport.
62 *
63 * Returns: 0 for success
64 */
65static struct fcoe_transport_internal *
66fcoe_transport_device_lookup(struct fcoe_transport *t,
67 struct net_device *netdev)
68{
69 struct fcoe_transport_internal *ti;
70
71 /* assign the transpor to this device */
72 mutex_lock(&t->devlock);
73 list_for_each_entry(ti, &t->devlist, list) {
74 if (ti->netdev == netdev) {
75 mutex_unlock(&t->devlock);
76 return ti;
77 }
78 }
79 mutex_unlock(&t->devlock);
80 return NULL;
81}
82/**
83 * fcoe_transport_device_add() - Assign a transport to a device
84 * @netdev: the netdev the transport to be attached to
85 *
86 * This will look for existing offload driver, if not found, it falls back to
87 * the default sw hba (fcoe_sw) as its fcoe transport.
88 *
89 * Returns: 0 for success
90 */
91static int fcoe_transport_device_add(struct fcoe_transport *t,
92 struct net_device *netdev)
93{
94 struct fcoe_transport_internal *ti;
95
96 ti = fcoe_transport_device_lookup(t, netdev);
97 if (ti) {
98 printk(KERN_DEBUG "fcoe_transport_device_add:"
99 "device %s is already added to transport %s\n",
100 netdev->name, t->name);
101 return -EEXIST;
102 }
103 /* allocate an internal struct to host the netdev and the list */
104 ti = kzalloc(sizeof(*ti), GFP_KERNEL);
105 if (!ti)
106 return -ENOMEM;
107
108 ti->t = t;
109 ti->netdev = netdev;
110 INIT_LIST_HEAD(&ti->list);
111 dev_hold(ti->netdev);
112
113 mutex_lock(&t->devlock);
114 list_add(&ti->list, &t->devlist);
115 mutex_unlock(&t->devlock);
116
117 printk(KERN_DEBUG "fcoe_transport_device_add:"
118 "device %s added to transport %s\n",
119 netdev->name, t->name);
120
121 return 0;
122}
123
124/**
125 * fcoe_transport_device_remove() - Remove a device from its transport
126 * @netdev: the netdev the transport to be attached to
127 *
128 * This removes the device from the transport so the given transport will
129 * not manage this device any more
130 *
131 * Returns: 0 for success
132 */
133static int fcoe_transport_device_remove(struct fcoe_transport *t,
134 struct net_device *netdev)
135{
136 struct fcoe_transport_internal *ti;
137
138 ti = fcoe_transport_device_lookup(t, netdev);
139 if (!ti) {
140 printk(KERN_DEBUG "fcoe_transport_device_remove:"
141 "device %s is not managed by transport %s\n",
142 netdev->name, t->name);
143 return -ENODEV;
144 }
145 mutex_lock(&t->devlock);
146 list_del(&ti->list);
147 mutex_unlock(&t->devlock);
148 printk(KERN_DEBUG "fcoe_transport_device_remove:"
149 "device %s removed from transport %s\n",
150 netdev->name, t->name);
151 dev_put(ti->netdev);
152 kfree(ti);
153 return 0;
154}
155
156/**
157 * fcoe_transport_device_remove_all() - Remove all from transport devlist
158 *
159 * This removes the device from the transport so the given transport will
160 * not manage this device any more
161 *
162 * Returns: 0 for success
163 */
164static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
165{
166 struct fcoe_transport_internal *ti, *tmp;
167
168 mutex_lock(&t->devlock);
169 list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
170 list_del(&ti->list);
171 kfree(ti);
172 }
173 mutex_unlock(&t->devlock);
174}
175
176/**
177 * fcoe_transport_match() - Use the bus device match function to match the hw
178 * @t: The fcoe transport to check
179 * @netdev: The netdev to match against
180 *
181 * This function is used to check if the given transport wants to manage the
182 * input netdev. if the transports implements the match function, it will be
183 * called, o.w. we just compare the pci vendor and device id.
184 *
185 * Returns: true for match up
186 */
187static bool fcoe_transport_match(struct fcoe_transport *t,
188 struct net_device *netdev)
189{
190 /* match transport by vendor and device id */
191 struct pci_dev *pci;
192
193 pci = fcoe_transport_pcidev(netdev);
194
195 if (pci) {
196 printk(KERN_DEBUG "fcoe_transport_match:"
197 "%s:%x:%x -- %s:%x:%x\n",
198 t->name, t->vendor, t->device,
199 netdev->name, pci->vendor, pci->device);
200
201 /* if transport supports match */
202 if (t->match)
203 return t->match(netdev);
204
205 /* else just compare the vendor and device id: pci only */
206 return (t->vendor == pci->vendor) && (t->device == pci->device);
207 }
208 return false;
209}
210
211/**
212 * fcoe_transport_lookup() - Check if the transport is already registered
213 * @t: the transport to be looked up
214 *
215 * This compares the parent device (pci) vendor and device id
216 *
217 * Returns: NULL if not found
218 *
219 * TODO: return default sw transport if no other transport is found
220 */
221static struct fcoe_transport *
222fcoe_transport_lookup(struct net_device *netdev)
223{
224 struct fcoe_transport *t;
225
226 mutex_lock(&fcoe_transports_lock);
227 list_for_each_entry(t, &fcoe_transports, list) {
228 if (fcoe_transport_match(t, netdev)) {
229 mutex_unlock(&fcoe_transports_lock);
230 return t;
231 }
232 }
233 mutex_unlock(&fcoe_transports_lock);
234
235 printk(KERN_DEBUG "fcoe_transport_lookup:"
236 "use default transport for %s\n", netdev->name);
237 return fcoe_transport_default();
238}
239
240/**
241 * fcoe_transport_register() - Adds a fcoe transport to the fcoe transports list
242 * @t: ptr to the fcoe transport to be added
243 *
244 * Returns: 0 for success
245 */
246int fcoe_transport_register(struct fcoe_transport *t)
247{
248 struct fcoe_transport *tt;
249
250 /* TODO - add fcoe_transport specific initialization here */
251 mutex_lock(&fcoe_transports_lock);
252 list_for_each_entry(tt, &fcoe_transports, list) {
253 if (tt == t) {
254 mutex_unlock(&fcoe_transports_lock);
255 return -EEXIST;
256 }
257 }
258 list_add_tail(&t->list, &fcoe_transports);
259 mutex_unlock(&fcoe_transports_lock);
260
261 printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
262
263 return 0;
264}
265EXPORT_SYMBOL_GPL(fcoe_transport_register);
266
267/**
268 * fcoe_transport_unregister() - Remove the tranport fro the fcoe transports list
269 * @t: ptr to the fcoe transport to be removed
270 *
271 * Returns: 0 for success
272 */
273int fcoe_transport_unregister(struct fcoe_transport *t)
274{
275 struct fcoe_transport *tt, *tmp;
276
277 mutex_lock(&fcoe_transports_lock);
278 list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
279 if (tt == t) {
280 list_del(&t->list);
281 mutex_unlock(&fcoe_transports_lock);
282 fcoe_transport_device_remove_all(t);
283 printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
284 t->name);
285 return 0;
286 }
287 }
288 mutex_unlock(&fcoe_transports_lock);
289 return -ENODEV;
290}
291EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
292
293/**
294 * fcoe_load_transport_driver() - Load an offload driver by alias name
295 * @netdev: the target net device
296 *
297 * Requests for an offload driver module as the fcoe transport, if fails, it
298 * falls back to use the SW HBA (fcoe_sw) as its transport
299 *
300 * TODO -
301 * 1. supports only PCI device
302 * 2. needs fix for VLAn and bonding
303 * 3. pure hw fcoe hba may not have netdev
304 *
305 * Returns: 0 for success
306 */
307int fcoe_load_transport_driver(struct net_device *netdev)
308{
309 struct pci_dev *pci;
310 struct device *dev = netdev->dev.parent;
311
312 if (fcoe_transport_lookup(netdev)) {
313 /* load default transport */
314 printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
315 netdev->name);
316 return -EEXIST;
317 }
318
319 pci = to_pci_dev(dev);
320 if (dev->bus != &pci_bus_type) {
321 printk(KERN_DEBUG "fcoe: support noly PCI device\n");
322 return -ENODEV;
323 }
324 printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
325 pci->vendor, pci->device);
326
327 return request_module("fcoe-pci-0x%04x-0x%04x",
328 pci->vendor, pci->device);
329
330}
331EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
332
333/**
334 * fcoe_transport_attach() - Load transport to fcoe
335 * @netdev: the netdev the transport to be attached to
336 *
337 * This will look for existing offload driver, if not found, it falls back to
338 * the default sw hba (fcoe_sw) as its fcoe transport.
339 *
340 * Returns: 0 for success
341 */
342int fcoe_transport_attach(struct net_device *netdev)
343{
344 struct fcoe_transport *t;
345
346 /* find the corresponding transport */
347 t = fcoe_transport_lookup(netdev);
348 if (!t) {
349 printk(KERN_DEBUG "fcoe_transport_attach"
350 ":no transport for %s:use %s\n",
351 netdev->name, t->name);
352 return -ENODEV;
353 }
354 /* add to the transport */
355 if (fcoe_transport_device_add(t, netdev)) {
356 printk(KERN_DEBUG "fcoe_transport_attach"
357 ":failed to add %s to tramsport %s\n",
358 netdev->name, t->name);
359 return -EIO;
360 }
361 /* transport create function */
362 if (t->create)
363 t->create(netdev);
364
365 printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
366 t->name, netdev->name);
367 return 0;
368}
369EXPORT_SYMBOL_GPL(fcoe_transport_attach);
370
371/**
372 * fcoe_transport_release() - Unload transport from fcoe
373 * @netdev: the net device on which fcoe is to be released
374 *
375 * Returns: 0 for success
376 */
377int fcoe_transport_release(struct net_device *netdev)
378{
379 struct fcoe_transport *t;
380
381 /* find the corresponding transport */
382 t = fcoe_transport_lookup(netdev);
383 if (!t) {
384 printk(KERN_DEBUG "fcoe_transport_release:"
385 "no transport for %s:use %s\n",
386 netdev->name, t->name);
387 return -ENODEV;
388 }
389 /* remove the device from the transport */
390 if (fcoe_transport_device_remove(t, netdev)) {
391 printk(KERN_DEBUG "fcoe_transport_release:"
392 "failed to add %s to tramsport %s\n",
393 netdev->name, t->name);
394 return -EIO;
395 }
396 /* transport destroy function */
397 if (t->destroy)
398 t->destroy(netdev);
399
400 printk(KERN_DEBUG "fcoe_transport_release:"
401 "device %s dettached from transport %s\n",
402 netdev->name, t->name);
403
404 return 0;
405}
406EXPORT_SYMBOL_GPL(fcoe_transport_release);
407
408/**
409 * fcoe_transport_init() - Initializes fcoe transport layer
410 *
411 * This prepares for the fcoe transport layer
412 *
413 * Returns: none
414 */
415int __init fcoe_transport_init(void)
416{
417 INIT_LIST_HEAD(&fcoe_transports);
418 mutex_init(&fcoe_transports_lock);
419 return 0;
420}
421
422/**
423 * fcoe_transport_exit() - Cleans up the fcoe transport layer
424 *
425 * This cleans up the fcoe transport layer. removing any transport on the list,
426 * note that the transport destroy func is not called here.
427 *
428 * Returns: none
429 */
430int __exit fcoe_transport_exit(void)
431{
432 struct fcoe_transport *t, *tmp;
433
434 mutex_lock(&fcoe_transports_lock);
435 list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
436 list_del(&t->list);
437 mutex_unlock(&fcoe_transports_lock);
438 fcoe_transport_device_remove_all(t);
439 mutex_lock(&fcoe_transports_lock);
440 }
441 mutex_unlock(&fcoe_transports_lock);
442 return 0;
443}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
new file mode 100644
index 000000000000..94e1e3189773
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -0,0 +1,1878 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/spinlock.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/ethtool.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/crc32.h>
29#include <linux/cpu.h>
30#include <linux/fs.h>
31#include <linux/sysfs.h>
32#include <linux/ctype.h>
33#include <scsi/scsi_tcq.h>
34#include <scsi/scsicam.h>
35#include <scsi/scsi_transport.h>
36#include <scsi/scsi_transport_fc.h>
37#include <net/rtnetlink.h>
38
39#include <scsi/fc/fc_encaps.h>
40#include <scsi/fc/fc_fip.h>
41
42#include <scsi/libfc.h>
43#include <scsi/fc_frame.h>
44#include <scsi/libfcoe.h>
45
46#include "fcoe.h"
47
48static int debug_fcoe;
49
50MODULE_AUTHOR("Open-FCoE.org");
51MODULE_DESCRIPTION("FCoE");
52MODULE_LICENSE("GPL v2");
53
54/* fcoe host list */
55LIST_HEAD(fcoe_hostlist);
56DEFINE_RWLOCK(fcoe_hostlist_lock);
57DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
58DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
59
60/* Function Prototyes */
61static int fcoe_reset(struct Scsi_Host *shost);
62static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
63static int fcoe_rcv(struct sk_buff *, struct net_device *,
64 struct packet_type *, struct net_device *);
65static int fcoe_percpu_receive_thread(void *arg);
66static void fcoe_clean_pending_queue(struct fc_lport *lp);
67static void fcoe_percpu_clean(struct fc_lport *lp);
68static int fcoe_link_ok(struct fc_lport *lp);
69
70static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71static int fcoe_hostlist_add(const struct fc_lport *);
72static int fcoe_hostlist_remove(const struct fc_lport *);
73
74static int fcoe_check_wait_queue(struct fc_lport *);
75static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76static void fcoe_dev_setup(void);
77static void fcoe_dev_cleanup(void);
78
79/* notification function from net device */
80static struct notifier_block fcoe_notifier = {
81 .notifier_call = fcoe_device_notification,
82};
83
84static struct scsi_transport_template *scsi_transport_fcoe_sw;
85
86struct fc_function_template fcoe_transport_function = {
87 .show_host_node_name = 1,
88 .show_host_port_name = 1,
89 .show_host_supported_classes = 1,
90 .show_host_supported_fc4s = 1,
91 .show_host_active_fc4s = 1,
92 .show_host_maxframe_size = 1,
93
94 .show_host_port_id = 1,
95 .show_host_supported_speeds = 1,
96 .get_host_speed = fc_get_host_speed,
97 .show_host_speed = 1,
98 .show_host_port_type = 1,
99 .get_host_port_state = fc_get_host_port_state,
100 .show_host_port_state = 1,
101 .show_host_symbolic_name = 1,
102
103 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
104 .show_rport_maxframe_size = 1,
105 .show_rport_supported_classes = 1,
106
107 .show_host_fabric_name = 1,
108 .show_starget_node_name = 1,
109 .show_starget_port_name = 1,
110 .show_starget_port_id = 1,
111 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
112 .show_rport_dev_loss_tmo = 1,
113 .get_fc_host_stats = fc_get_host_stats,
114 .issue_fc_host_lip = fcoe_reset,
115
116 .terminate_rport_io = fc_rport_terminate_io,
117};
118
119static struct scsi_host_template fcoe_shost_template = {
120 .module = THIS_MODULE,
121 .name = "FCoE Driver",
122 .proc_name = FCOE_NAME,
123 .queuecommand = fc_queuecommand,
124 .eh_abort_handler = fc_eh_abort,
125 .eh_device_reset_handler = fc_eh_device_reset,
126 .eh_host_reset_handler = fc_eh_host_reset,
127 .slave_alloc = fc_slave_alloc,
128 .change_queue_depth = fc_change_queue_depth,
129 .change_queue_type = fc_change_queue_type,
130 .this_id = -1,
131 .cmd_per_lun = 32,
132 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
133 .use_clustering = ENABLE_CLUSTERING,
134 .sg_tablesize = SG_ALL,
135 .max_sectors = 0xffff,
136};
137
138/**
139 * fcoe_lport_config() - sets up the fc_lport
140 * @lp: ptr to the fc_lport
141 * @shost: ptr to the parent scsi host
142 *
143 * Returns: 0 for success
144 */
145static int fcoe_lport_config(struct fc_lport *lp)
146{
147 lp->link_up = 0;
148 lp->qfull = 0;
149 lp->max_retry_count = 3;
150 lp->e_d_tov = 2 * 1000; /* FC-FS default */
151 lp->r_a_tov = 2 * 2 * 1000;
152 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
153 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
154
155 fc_lport_init_stats(lp);
156
157 /* lport fc_lport related configuration */
158 fc_lport_config(lp);
159
160 /* offload related configuration */
161 lp->crc_offload = 0;
162 lp->seq_offload = 0;
163 lp->lro_enabled = 0;
164 lp->lro_xid = 0;
165 lp->lso_max = 0;
166
167 return 0;
168}
169
170/**
171 * fcoe_netdev_config() - Set up netdev for SW FCoE
172 * @lp : ptr to the fc_lport
173 * @netdev : ptr to the associated netdevice struct
174 *
175 * Must be called after fcoe_lport_config() as it will use lport mutex
176 *
177 * Returns : 0 for success
178 */
179static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
180{
181 u32 mfs;
182 u64 wwnn, wwpn;
183 struct fcoe_softc *fc;
184 u8 flogi_maddr[ETH_ALEN];
185
186 /* Setup lport private data to point to fcoe softc */
187 fc = lport_priv(lp);
188 fc->ctlr.lp = lp;
189 fc->real_dev = netdev;
190 fc->phys_dev = netdev;
191
192 /* Require support for get_pauseparam ethtool op. */
193 if (netdev->priv_flags & IFF_802_1Q_VLAN)
194 fc->phys_dev = vlan_dev_real_dev(netdev);
195
196 /* Do not support for bonding device */
197 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
198 (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
199 (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
200 return -EOPNOTSUPP;
201 }
202
203 /*
204 * Determine max frame size based on underlying device and optional
205 * user-configured limit. If the MFS is too low, fcoe_link_ok()
206 * will return 0, so do this first.
207 */
208 mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
209 sizeof(struct fcoe_crc_eof));
210 if (fc_set_mfs(lp, mfs))
211 return -EINVAL;
212
213 /* offload features support */
214 if (fc->real_dev->features & NETIF_F_SG)
215 lp->sg_supp = 1;
216
217#ifdef NETIF_F_FCOE_CRC
218 if (netdev->features & NETIF_F_FCOE_CRC) {
219 lp->crc_offload = 1;
220 printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
221 netdev->name);
222 }
223#endif
224#ifdef NETIF_F_FSO
225 if (netdev->features & NETIF_F_FSO) {
226 lp->seq_offload = 1;
227 lp->lso_max = netdev->gso_max_size;
228 printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
229 netdev->name, lp->lso_max);
230 }
231#endif
232 if (netdev->fcoe_ddp_xid) {
233 lp->lro_enabled = 1;
234 lp->lro_xid = netdev->fcoe_ddp_xid;
235 printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
236 netdev->name, lp->lro_xid);
237 }
238 skb_queue_head_init(&fc->fcoe_pending_queue);
239 fc->fcoe_pending_queue_active = 0;
240
241 /* setup Source Mac Address */
242 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
243 fc->real_dev->addr_len);
244
245 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
246 fc_set_wwnn(lp, wwnn);
247 /* XXX - 3rd arg needs to be vlan id */
248 wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
249 fc_set_wwpn(lp, wwpn);
250
251 /*
252 * Add FCoE MAC address as second unicast MAC address
253 * or enter promiscuous mode if not capable of listening
254 * for multiple unicast MACs.
255 */
256 rtnl_lock();
257 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
258 dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
259 rtnl_unlock();
260
261 /*
262 * setup the receive function from ethernet driver
263 * on the ethertype for the given device
264 */
265 fc->fcoe_packet_type.func = fcoe_rcv;
266 fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
267 fc->fcoe_packet_type.dev = fc->real_dev;
268 dev_add_pack(&fc->fcoe_packet_type);
269
270 return 0;
271}
272
273/**
274 * fcoe_shost_config() - Sets up fc_lport->host
275 * @lp : ptr to the fc_lport
276 * @shost : ptr to the associated scsi host
277 * @dev : device associated to scsi host
278 *
279 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
280 *
281 * Returns : 0 for success
282 */
283static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
284 struct device *dev)
285{
286 int rc = 0;
287
288 /* lport scsi host config */
289 lp->host = shost;
290
291 lp->host->max_lun = FCOE_MAX_LUN;
292 lp->host->max_id = FCOE_MAX_FCP_TARGET;
293 lp->host->max_channel = 0;
294 lp->host->transportt = scsi_transport_fcoe_sw;
295
296 /* add the new host to the SCSI-ml */
297 rc = scsi_add_host(lp->host, dev);
298 if (rc) {
299 FC_DBG("fcoe_shost_config:error on scsi_add_host\n");
300 return rc;
301 }
302 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
303 FCOE_NAME, FCOE_VERSION,
304 fcoe_netdev(lp)->name);
305
306 return 0;
307}
308
309/**
310 * fcoe_em_config() - allocates em for this lport
311 * @lp: the port that em is to allocated for
312 *
313 * Returns : 0 on success
314 */
315static inline int fcoe_em_config(struct fc_lport *lp)
316{
317 BUG_ON(lp->emp);
318
319 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
320 FCOE_MIN_XID, FCOE_MAX_XID);
321 if (!lp->emp)
322 return -ENOMEM;
323
324 return 0;
325}
326
327/**
328 * fcoe_if_destroy() - FCoE software HBA tear-down function
329 * @netdev: ptr to the associated net_device
330 *
331 * Returns: 0 if link is OK for use by FCoE.
332 */
333static int fcoe_if_destroy(struct net_device *netdev)
334{
335 struct fc_lport *lp = NULL;
336 struct fcoe_softc *fc;
337 u8 flogi_maddr[ETH_ALEN];
338
339 BUG_ON(!netdev);
340
341 printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n",
342 netdev->name);
343
344 lp = fcoe_hostlist_lookup(netdev);
345 if (!lp)
346 return -ENODEV;
347
348 fc = lport_priv(lp);
349
350 /* Logout of the fabric */
351 fc_fabric_logoff(lp);
352
353 /* Remove the instance from fcoe's list */
354 fcoe_hostlist_remove(lp);
355
356 /* Don't listen for Ethernet packets anymore */
357 dev_remove_pack(&fc->fcoe_packet_type);
358 dev_remove_pack(&fc->fip_packet_type);
359 fcoe_ctlr_destroy(&fc->ctlr);
360
361 /* Cleanup the fc_lport */
362 fc_lport_destroy(lp);
363 fc_fcp_destroy(lp);
364
365 /* Detach from the scsi-ml */
366 fc_remove_host(lp->host);
367 scsi_remove_host(lp->host);
368
369 /* There are no more rports or I/O, free the EM */
370 if (lp->emp)
371 fc_exch_mgr_free(lp->emp);
372
373 /* Delete secondary MAC addresses */
374 rtnl_lock();
375 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
376 dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
377 if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
378 dev_unicast_delete(fc->real_dev,
379 fc->ctlr.data_src_addr, ETH_ALEN);
380 dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
381 rtnl_unlock();
382
383 /* Free the per-CPU revieve threads */
384 fcoe_percpu_clean(lp);
385
386 /* Free existing skbs */
387 fcoe_clean_pending_queue(lp);
388
389 /* Free memory used by statistical counters */
390 fc_lport_free_stats(lp);
391
392 /* Release the net_device and Scsi_Host */
393 dev_put(fc->real_dev);
394 scsi_host_put(lp->host);
395
396 return 0;
397}
398
399/*
400 * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
401 * @lp: the corresponding fc_lport
402 * @xid: the exchange id for this ddp transfer
403 * @sgl: the scatterlist describing this transfer
404 * @sgc: number of sg items
405 *
406 * Returns : 0 no ddp
407 */
408static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
409 struct scatterlist *sgl, unsigned int sgc)
410{
411 struct net_device *n = fcoe_netdev(lp);
412
413 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
414 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
415
416 return 0;
417}
418
419/*
420 * fcoe_ddp_done - calls LLD's ddp_done through net_device
421 * @lp: the corresponding fc_lport
422 * @xid: the exchange id for this ddp transfer
423 *
424 * Returns : the length of data that have been completed by ddp
425 */
426static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
427{
428 struct net_device *n = fcoe_netdev(lp);
429
430 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
431 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
432 return 0;
433}
434
435static struct libfc_function_template fcoe_libfc_fcn_templ = {
436 .frame_send = fcoe_xmit,
437 .ddp_setup = fcoe_ddp_setup,
438 .ddp_done = fcoe_ddp_done,
439};
440
441/**
442 * fcoe_fip_recv - handle a received FIP frame.
443 * @skb: the receive skb
444 * @dev: associated &net_device
445 * @ptype: the &packet_type structure which was used to register this handler.
446 * @orig_dev: original receive &net_device, in case @dev is a bond.
447 *
448 * Returns: 0 for success
449 */
450static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
451 struct packet_type *ptype,
452 struct net_device *orig_dev)
453{
454 struct fcoe_softc *fc;
455
456 fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
457 fcoe_ctlr_recv(&fc->ctlr, skb);
458 return 0;
459}
460
461/**
462 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
463 * @fip: FCoE controller.
464 * @skb: FIP Packet.
465 */
466static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
467{
468 skb->dev = fcoe_from_ctlr(fip)->real_dev;
469 dev_queue_xmit(skb);
470}
471
472/**
473 * fcoe_update_src_mac() - Update Ethernet MAC filters.
474 * @fip: FCoE controller.
475 * @old: Unicast MAC address to delete if the MAC is non-zero.
476 * @new: Unicast MAC address to add.
477 *
478 * Remove any previously-set unicast MAC filter.
479 * Add secondary FCoE MAC address filter for our OUI.
480 */
481static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
482{
483 struct fcoe_softc *fc;
484
485 fc = fcoe_from_ctlr(fip);
486 rtnl_lock();
487 if (!is_zero_ether_addr(old))
488 dev_unicast_delete(fc->real_dev, old, ETH_ALEN);
489 dev_unicast_add(fc->real_dev, new, ETH_ALEN);
490 rtnl_unlock();
491}
492
493/**
494 * fcoe_if_create() - this function creates the fcoe interface
495 * @netdev: pointer the associated netdevice
496 *
497 * Creates fc_lport struct and scsi_host for lport, configures lport
498 * and starts fabric login.
499 *
500 * Returns : 0 on success
501 */
502static int fcoe_if_create(struct net_device *netdev)
503{
504 int rc;
505 struct fc_lport *lp = NULL;
506 struct fcoe_softc *fc;
507 struct Scsi_Host *shost;
508
509 BUG_ON(!netdev);
510
511 printk(KERN_DEBUG "fcoe_if_create:interface on %s\n",
512 netdev->name);
513
514 lp = fcoe_hostlist_lookup(netdev);
515 if (lp)
516 return -EEXIST;
517
518 shost = libfc_host_alloc(&fcoe_shost_template,
519 sizeof(struct fcoe_softc));
520 if (!shost) {
521 FC_DBG("Could not allocate host structure\n");
522 return -ENOMEM;
523 }
524 lp = shost_priv(shost);
525 fc = lport_priv(lp);
526
527 /* configure fc_lport, e.g., em */
528 rc = fcoe_lport_config(lp);
529 if (rc) {
530 FC_DBG("Could not configure lport\n");
531 goto out_host_put;
532 }
533
534 /* configure lport network properties */
535 rc = fcoe_netdev_config(lp, netdev);
536 if (rc) {
537 FC_DBG("Could not configure netdev for lport\n");
538 goto out_host_put;
539 }
540
541 /*
542 * Initialize FIP.
543 */
544 fcoe_ctlr_init(&fc->ctlr);
545 fc->ctlr.send = fcoe_fip_send;
546 fc->ctlr.update_mac = fcoe_update_src_mac;
547
548 fc->fip_packet_type.func = fcoe_fip_recv;
549 fc->fip_packet_type.type = htons(ETH_P_FIP);
550 fc->fip_packet_type.dev = fc->real_dev;
551 dev_add_pack(&fc->fip_packet_type);
552
553 /* configure lport scsi host properties */
554 rc = fcoe_shost_config(lp, shost, &netdev->dev);
555 if (rc) {
556 FC_DBG("Could not configure shost for lport\n");
557 goto out_host_put;
558 }
559
560 /* lport exch manager allocation */
561 rc = fcoe_em_config(lp);
562 if (rc) {
563 FC_DBG("Could not configure em for lport\n");
564 goto out_host_put;
565 }
566
567 /* Initialize the library */
568 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
569 if (rc) {
570 FC_DBG("Could not configure libfc for lport!\n");
571 goto out_lp_destroy;
572 }
573
574 /* add to lports list */
575 fcoe_hostlist_add(lp);
576
577 lp->boot_time = jiffies;
578
579 fc_fabric_login(lp);
580
581 if (!fcoe_link_ok(lp))
582 fcoe_ctlr_link_up(&fc->ctlr);
583
584 dev_hold(netdev);
585
586 return rc;
587
588out_lp_destroy:
589 fc_exch_mgr_free(lp->emp); /* Free the EM */
590out_host_put:
591 scsi_host_put(lp->host);
592 return rc;
593}
594
595/**
596 * fcoe_if_init() - attach to scsi transport
597 *
598 * Returns : 0 on success
599 */
600static int __init fcoe_if_init(void)
601{
602 /* attach to scsi transport */
603 scsi_transport_fcoe_sw =
604 fc_attach_transport(&fcoe_transport_function);
605
606 if (!scsi_transport_fcoe_sw) {
607 printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n");
608 return -ENODEV;
609 }
610
611 return 0;
612}
613
614/**
615 * fcoe_if_exit() - detach from scsi transport
616 *
617 * Returns : 0 on success
618 */
619int __exit fcoe_if_exit(void)
620{
621 fc_release_transport(scsi_transport_fcoe_sw);
622 return 0;
623}
624
625/**
626 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
627 * @cpu: cpu index for the online cpu
628 */
629static void fcoe_percpu_thread_create(unsigned int cpu)
630{
631 struct fcoe_percpu_s *p;
632 struct task_struct *thread;
633
634 p = &per_cpu(fcoe_percpu, cpu);
635
636 thread = kthread_create(fcoe_percpu_receive_thread,
637 (void *)p, "fcoethread/%d", cpu);
638
639 if (likely(!IS_ERR(p->thread))) {
640 kthread_bind(thread, cpu);
641 wake_up_process(thread);
642
643 spin_lock_bh(&p->fcoe_rx_list.lock);
644 p->thread = thread;
645 spin_unlock_bh(&p->fcoe_rx_list.lock);
646 }
647}
648
649/**
650 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
651 * @cpu: cpu index the rx thread is to be removed
652 *
653 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
654 * current CPU's Rx thread. If the thread being destroyed is bound to
655 * the CPU processing this context the skbs will be freed.
656 */
657static void fcoe_percpu_thread_destroy(unsigned int cpu)
658{
659 struct fcoe_percpu_s *p;
660 struct task_struct *thread;
661 struct page *crc_eof;
662 struct sk_buff *skb;
663#ifdef CONFIG_SMP
664 struct fcoe_percpu_s *p0;
665 unsigned targ_cpu = smp_processor_id();
666#endif /* CONFIG_SMP */
667
668 printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
669
670 /* Prevent any new skbs from being queued for this CPU. */
671 p = &per_cpu(fcoe_percpu, cpu);
672 spin_lock_bh(&p->fcoe_rx_list.lock);
673 thread = p->thread;
674 p->thread = NULL;
675 crc_eof = p->crc_eof_page;
676 p->crc_eof_page = NULL;
677 p->crc_eof_offset = 0;
678 spin_unlock_bh(&p->fcoe_rx_list.lock);
679
680#ifdef CONFIG_SMP
681 /*
682 * Don't bother moving the skb's if this context is running
683 * on the same CPU that is having its thread destroyed. This
684 * can easily happen when the module is removed.
685 */
686 if (cpu != targ_cpu) {
687 p0 = &per_cpu(fcoe_percpu, targ_cpu);
688 spin_lock_bh(&p0->fcoe_rx_list.lock);
689 if (p0->thread) {
690 FC_DBG("Moving frames from CPU %d to CPU %d\n",
691 cpu, targ_cpu);
692
693 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
694 __skb_queue_tail(&p0->fcoe_rx_list, skb);
695 spin_unlock_bh(&p0->fcoe_rx_list.lock);
696 } else {
697 /*
698 * The targeted CPU is not initialized and cannot accept
699 * new skbs. Unlock the targeted CPU and drop the skbs
700 * on the CPU that is going offline.
701 */
702 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
703 kfree_skb(skb);
704 spin_unlock_bh(&p0->fcoe_rx_list.lock);
705 }
706 } else {
707 /*
708 * This scenario occurs when the module is being removed
709 * and all threads are being destroyed. skbs will continue
710 * to be shifted from the CPU thread that is being removed
711 * to the CPU thread associated with the CPU that is processing
712 * the module removal. Once there is only one CPU Rx thread it
713 * will reach this case and we will drop all skbs and later
714 * stop the thread.
715 */
716 spin_lock_bh(&p->fcoe_rx_list.lock);
717 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
718 kfree_skb(skb);
719 spin_unlock_bh(&p->fcoe_rx_list.lock);
720 }
721#else
722 /*
723 * This a non-SMP scenario where the singluar Rx thread is
724 * being removed. Free all skbs and stop the thread.
725 */
726 spin_lock_bh(&p->fcoe_rx_list.lock);
727 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
728 kfree_skb(skb);
729 spin_unlock_bh(&p->fcoe_rx_list.lock);
730#endif
731
732 if (thread)
733 kthread_stop(thread);
734
735 if (crc_eof)
736 put_page(crc_eof);
737}
738
739/**
740 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
741 * @nfb: callback data block
742 * @action: event triggering the callback
743 * @hcpu: index for the cpu of this event
744 *
745 * This creates or destroys per cpu data for fcoe
746 *
747 * Returns NOTIFY_OK always.
748 */
749static int fcoe_cpu_callback(struct notifier_block *nfb,
750 unsigned long action, void *hcpu)
751{
752 unsigned cpu = (unsigned long)hcpu;
753
754 switch (action) {
755 case CPU_ONLINE:
756 case CPU_ONLINE_FROZEN:
757 FC_DBG("CPU %x online: Create Rx thread\n", cpu);
758 fcoe_percpu_thread_create(cpu);
759 break;
760 case CPU_DEAD:
761 case CPU_DEAD_FROZEN:
762 FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
763 fcoe_percpu_thread_destroy(cpu);
764 break;
765 default:
766 break;
767 }
768 return NOTIFY_OK;
769}
770
771static struct notifier_block fcoe_cpu_notifier = {
772 .notifier_call = fcoe_cpu_callback,
773};
774
775/**
776 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
777 * @skb: the receive skb
778 * @dev: associated net device
779 * @ptype: context
780 * @odldev: last device
781 *
782 * this function will receive the packet and build fc frame and pass it up
783 *
784 * Returns: 0 for success
785 */
786int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
787 struct packet_type *ptype, struct net_device *olddev)
788{
789 struct fc_lport *lp;
790 struct fcoe_rcv_info *fr;
791 struct fcoe_softc *fc;
792 struct fc_frame_header *fh;
793 struct fcoe_percpu_s *fps;
794 unsigned short oxid;
795 unsigned int cpu = 0;
796
797 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
798 lp = fc->ctlr.lp;
799 if (unlikely(lp == NULL)) {
800 FC_DBG("cannot find hba structure");
801 goto err2;
802 }
803 if (!lp->link_up)
804 goto err2;
805
806 if (unlikely(debug_fcoe)) {
807 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
808 "end:%p sum:%d dev:%s", skb->len, skb->data_len,
809 skb->head, skb->data, skb_tail_pointer(skb),
810 skb_end_pointer(skb), skb->csum,
811 skb->dev ? skb->dev->name : "<NULL>");
812
813 }
814
815 /* check for FCOE packet type */
816 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
817 FC_DBG("wrong FC type frame");
818 goto err;
819 }
820
821 /*
822 * Check for minimum frame length, and make sure required FCoE
823 * and FC headers are pulled into the linear data area.
824 */
825 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
826 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
827 goto err;
828
829 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
830 fh = (struct fc_frame_header *) skb_transport_header(skb);
831
832 oxid = ntohs(fh->fh_ox_id);
833
834 fr = fcoe_dev_from_skb(skb);
835 fr->fr_dev = lp;
836 fr->ptype = ptype;
837
838#ifdef CONFIG_SMP
839 /*
840 * The incoming frame exchange id(oxid) is ANDed with num of online
841 * cpu bits to get cpu and then this cpu is used for selecting
842 * a per cpu kernel thread from fcoe_percpu.
843 */
844 cpu = oxid & (num_online_cpus() - 1);
845#endif
846
847 fps = &per_cpu(fcoe_percpu, cpu);
848 spin_lock_bh(&fps->fcoe_rx_list.lock);
849 if (unlikely(!fps->thread)) {
850 /*
851 * The targeted CPU is not ready, let's target
852 * the first CPU now. For non-SMP systems this
853 * will check the same CPU twice.
854 */
855 FC_DBG("CPU is online, but no receive thread ready "
856 "for incoming skb- using first online CPU.\n");
857
858 spin_unlock_bh(&fps->fcoe_rx_list.lock);
859 cpu = first_cpu(cpu_online_map);
860 fps = &per_cpu(fcoe_percpu, cpu);
861 spin_lock_bh(&fps->fcoe_rx_list.lock);
862 if (!fps->thread) {
863 spin_unlock_bh(&fps->fcoe_rx_list.lock);
864 goto err;
865 }
866 }
867
868 /*
869 * We now have a valid CPU that we're targeting for
870 * this skb. We also have this receive thread locked,
871 * so we're free to queue skbs into it's queue.
872 */
873 __skb_queue_tail(&fps->fcoe_rx_list, skb);
874 if (fps->fcoe_rx_list.qlen == 1)
875 wake_up_process(fps->thread);
876
877 spin_unlock_bh(&fps->fcoe_rx_list.lock);
878
879 return 0;
880err:
881 fc_lport_get_stats(lp)->ErrorFrames++;
882
883err2:
884 kfree_skb(skb);
885 return -1;
886}
887EXPORT_SYMBOL_GPL(fcoe_rcv);
888
889/**
890 * fcoe_start_io() - pass to netdev to start xmit for fcoe
891 * @skb: the skb to be xmitted
892 *
893 * Returns: 0 for success
894 */
895static inline int fcoe_start_io(struct sk_buff *skb)
896{
897 int rc;
898
899 skb_get(skb);
900 rc = dev_queue_xmit(skb);
901 if (rc != 0)
902 return rc;
903 kfree_skb(skb);
904 return 0;
905}
906
907/**
908 * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof
909 * @skb: the skb to be xmitted
910 * @tlen: total len
911 *
912 * Returns: 0 for success
913 */
914static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
915{
916 struct fcoe_percpu_s *fps;
917 struct page *page;
918
919 fps = &get_cpu_var(fcoe_percpu);
920 page = fps->crc_eof_page;
921 if (!page) {
922 page = alloc_page(GFP_ATOMIC);
923 if (!page) {
924 put_cpu_var(fcoe_percpu);
925 return -ENOMEM;
926 }
927 fps->crc_eof_page = page;
928 fps->crc_eof_offset = 0;
929 }
930
931 get_page(page);
932 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
933 fps->crc_eof_offset, tlen);
934 skb->len += tlen;
935 skb->data_len += tlen;
936 skb->truesize += tlen;
937 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
938
939 if (fps->crc_eof_offset >= PAGE_SIZE) {
940 fps->crc_eof_page = NULL;
941 fps->crc_eof_offset = 0;
942 put_page(page);
943 }
944 put_cpu_var(fcoe_percpu);
945 return 0;
946}
947
948/**
949 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
950 * @fp: the fc_frame containg data to be checksummed
951 *
952 * This uses crc32() to calculate the crc for fc frame
953 * Return : 32 bit crc
954 */
955u32 fcoe_fc_crc(struct fc_frame *fp)
956{
957 struct sk_buff *skb = fp_skb(fp);
958 struct skb_frag_struct *frag;
959 unsigned char *data;
960 unsigned long off, len, clen;
961 u32 crc;
962 unsigned i;
963
964 crc = crc32(~0, skb->data, skb_headlen(skb));
965
966 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
967 frag = &skb_shinfo(skb)->frags[i];
968 off = frag->page_offset;
969 len = frag->size;
970 while (len > 0) {
971 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
972 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
973 KM_SKB_DATA_SOFTIRQ);
974 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
975 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
976 off += clen;
977 len -= clen;
978 }
979 }
980 return crc;
981}
982
983/**
984 * fcoe_xmit() - FCoE frame transmit function
985 * @lp: the associated local port
986 * @fp: the fc_frame to be transmitted
987 *
988 * Return : 0 for success
989 */
990int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
991{
992 int wlen, rc = 0;
993 u32 crc;
994 struct ethhdr *eh;
995 struct fcoe_crc_eof *cp;
996 struct sk_buff *skb;
997 struct fcoe_dev_stats *stats;
998 struct fc_frame_header *fh;
999 unsigned int hlen; /* header length implies the version */
1000 unsigned int tlen; /* trailer length */
1001 unsigned int elen; /* eth header, may include vlan */
1002 struct fcoe_softc *fc;
1003 u8 sof, eof;
1004 struct fcoe_hdr *hp;
1005
1006 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1007
1008 fc = lport_priv(lp);
1009 fh = fc_frame_header_get(fp);
1010 skb = fp_skb(fp);
1011 wlen = skb->len / FCOE_WORD_TO_BYTE;
1012
1013 if (!lp->link_up) {
1014 kfree(skb);
1015 return 0;
1016 }
1017
1018 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1019 fcoe_ctlr_els_send(&fc->ctlr, skb))
1020 return 0;
1021
1022 sof = fr_sof(fp);
1023 eof = fr_eof(fp);
1024
1025 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
1026 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1027 hlen = sizeof(struct fcoe_hdr);
1028 tlen = sizeof(struct fcoe_crc_eof);
1029 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1030
1031 /* crc offload */
1032 if (likely(lp->crc_offload)) {
1033 skb->ip_summed = CHECKSUM_PARTIAL;
1034 skb->csum_start = skb_headroom(skb);
1035 skb->csum_offset = skb->len;
1036 crc = 0;
1037 } else {
1038 skb->ip_summed = CHECKSUM_NONE;
1039 crc = fcoe_fc_crc(fp);
1040 }
1041
1042 /* copy fc crc and eof to the skb buff */
1043 if (skb_is_nonlinear(skb)) {
1044 skb_frag_t *frag;
1045 if (fcoe_get_paged_crc_eof(skb, tlen)) {
1046 kfree_skb(skb);
1047 return -ENOMEM;
1048 }
1049 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1050 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1051 + frag->page_offset;
1052 } else {
1053 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1054 }
1055
1056 memset(cp, 0, sizeof(*cp));
1057 cp->fcoe_eof = eof;
1058 cp->fcoe_crc32 = cpu_to_le32(~crc);
1059
1060 if (skb_is_nonlinear(skb)) {
1061 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1062 cp = NULL;
1063 }
1064
1065 /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
1066 skb_push(skb, elen + hlen);
1067 skb_reset_mac_header(skb);
1068 skb_reset_network_header(skb);
1069 skb->mac_len = elen;
1070 skb->protocol = htons(ETH_P_FCOE);
1071 skb->dev = fc->real_dev;
1072
1073 /* fill up mac and fcoe headers */
1074 eh = eth_hdr(skb);
1075 eh->h_proto = htons(ETH_P_FCOE);
1076 if (fc->ctlr.map_dest)
1077 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1078 else
1079 /* insert GW address */
1080 memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN);
1081
1082 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1083 memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN);
1084 else
1085 memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN);
1086
1087 hp = (struct fcoe_hdr *)(eh + 1);
1088 memset(hp, 0, sizeof(*hp));
1089 if (FC_FCOE_VER)
1090 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1091 hp->fcoe_sof = sof;
1092
1093#ifdef NETIF_F_FSO
1094 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1095 if (lp->seq_offload && fr_max_payload(fp)) {
1096 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1097 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1098 } else {
1099 skb_shinfo(skb)->gso_type = 0;
1100 skb_shinfo(skb)->gso_size = 0;
1101 }
1102#endif
1103 /* update tx stats: regardless if LLD fails */
1104 stats = fc_lport_get_stats(lp);
1105 stats->TxFrames++;
1106 stats->TxWords += wlen;
1107
1108 /* send down to lld */
1109 fr_dev(fp) = lp;
1110 if (fc->fcoe_pending_queue.qlen)
1111 rc = fcoe_check_wait_queue(lp);
1112
1113 if (rc == 0)
1114 rc = fcoe_start_io(skb);
1115
1116 if (rc) {
1117 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1118 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1119 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1120 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1121 lp->qfull = 1;
1122 }
1123
1124 return 0;
1125}
1126EXPORT_SYMBOL_GPL(fcoe_xmit);
1127
1128/**
1129 * fcoe_percpu_receive_thread() - recv thread per cpu
1130 * @arg: ptr to the fcoe per cpu struct
1131 *
1132 * Return: 0 for success
1133 */
1134int fcoe_percpu_receive_thread(void *arg)
1135{
1136 struct fcoe_percpu_s *p = arg;
1137 u32 fr_len;
1138 struct fc_lport *lp;
1139 struct fcoe_rcv_info *fr;
1140 struct fcoe_dev_stats *stats;
1141 struct fc_frame_header *fh;
1142 struct sk_buff *skb;
1143 struct fcoe_crc_eof crc_eof;
1144 struct fc_frame *fp;
1145 u8 *mac = NULL;
1146 struct fcoe_softc *fc;
1147 struct fcoe_hdr *hp;
1148
1149 set_user_nice(current, -20);
1150
1151 while (!kthread_should_stop()) {
1152
1153 spin_lock_bh(&p->fcoe_rx_list.lock);
1154 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1155 set_current_state(TASK_INTERRUPTIBLE);
1156 spin_unlock_bh(&p->fcoe_rx_list.lock);
1157 schedule();
1158 set_current_state(TASK_RUNNING);
1159 if (kthread_should_stop())
1160 return 0;
1161 spin_lock_bh(&p->fcoe_rx_list.lock);
1162 }
1163 spin_unlock_bh(&p->fcoe_rx_list.lock);
1164 fr = fcoe_dev_from_skb(skb);
1165 lp = fr->fr_dev;
1166 if (unlikely(lp == NULL)) {
1167 FC_DBG("invalid HBA Structure");
1168 kfree_skb(skb);
1169 continue;
1170 }
1171
1172 if (unlikely(debug_fcoe)) {
1173 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
1174 "tail:%p end:%p sum:%d dev:%s",
1175 skb->len, skb->data_len,
1176 skb->head, skb->data, skb_tail_pointer(skb),
1177 skb_end_pointer(skb), skb->csum,
1178 skb->dev ? skb->dev->name : "<NULL>");
1179 }
1180
1181 /*
1182 * Save source MAC address before discarding header.
1183 */
1184 fc = lport_priv(lp);
1185 if (skb_is_nonlinear(skb))
1186 skb_linearize(skb); /* not ideal */
1187 mac = eth_hdr(skb)->h_source;
1188
1189 /*
1190 * Frame length checks and setting up the header pointers
1191 * was done in fcoe_rcv already.
1192 */
1193 hp = (struct fcoe_hdr *) skb_network_header(skb);
1194 fh = (struct fc_frame_header *) skb_transport_header(skb);
1195
1196 stats = fc_lport_get_stats(lp);
1197 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1198 if (stats->ErrorFrames < 5)
1199 printk(KERN_WARNING "FCoE version "
1200 "mismatch: The frame has "
1201 "version %x, but the "
1202 "initiator supports version "
1203 "%x\n", FC_FCOE_DECAPS_VER(hp),
1204 FC_FCOE_VER);
1205 stats->ErrorFrames++;
1206 kfree_skb(skb);
1207 continue;
1208 }
1209
1210 skb_pull(skb, sizeof(struct fcoe_hdr));
1211 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1212
1213 stats->RxFrames++;
1214 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1215
1216 fp = (struct fc_frame *)skb;
1217 fc_frame_init(fp);
1218 fr_dev(fp) = lp;
1219 fr_sof(fp) = hp->fcoe_sof;
1220
1221 /* Copy out the CRC and EOF trailer for access */
1222 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1223 kfree_skb(skb);
1224 continue;
1225 }
1226 fr_eof(fp) = crc_eof.fcoe_eof;
1227 fr_crc(fp) = crc_eof.fcoe_crc32;
1228 if (pskb_trim(skb, fr_len)) {
1229 kfree_skb(skb);
1230 continue;
1231 }
1232
1233 /*
1234 * We only check CRC if no offload is available and if it is
1235 * it's solicited data, in which case, the FCP layer would
1236 * check it during the copy.
1237 */
1238 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1239 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1240 else
1241 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1242
1243 fh = fc_frame_header_get(fp);
1244 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1245 fh->fh_type == FC_TYPE_FCP) {
1246 fc_exch_recv(lp, lp->emp, fp);
1247 continue;
1248 }
1249 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1250 if (le32_to_cpu(fr_crc(fp)) !=
1251 ~crc32(~0, skb->data, fr_len)) {
1252 if (debug_fcoe || stats->InvalidCRCCount < 5)
1253 printk(KERN_WARNING "fcoe: dropping "
1254 "frame with CRC error\n");
1255 stats->InvalidCRCCount++;
1256 stats->ErrorFrames++;
1257 fc_frame_free(fp);
1258 continue;
1259 }
1260 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1261 }
1262 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1263 fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) {
1264 fc_frame_free(fp);
1265 continue;
1266 }
1267 fc_exch_recv(lp, lp->emp, fp);
1268 }
1269 return 0;
1270}
1271
1272/**
1273 * fcoe_watchdog() - fcoe timer callback
1274 * @vp:
1275 *
1276 * This checks the pending queue length for fcoe and set lport qfull
1277 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
1278 * fcoe_hostlist.
1279 *
1280 * Returns: 0 for success
1281 */
1282void fcoe_watchdog(ulong vp)
1283{
1284 struct fcoe_softc *fc;
1285
1286 read_lock(&fcoe_hostlist_lock);
1287 list_for_each_entry(fc, &fcoe_hostlist, list) {
1288 if (fc->ctlr.lp)
1289 fcoe_check_wait_queue(fc->ctlr.lp);
1290 }
1291 read_unlock(&fcoe_hostlist_lock);
1292
1293 fcoe_timer.expires = jiffies + (1 * HZ);
1294 add_timer(&fcoe_timer);
1295}
1296
1297
1298/**
1299 * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue
1300 * @lp: the fc_port for this skb
1301 * @skb: the associated skb to be xmitted
1302 *
1303 * This empties the wait_queue, dequeue the head of the wait_queue queue
1304 * and calls fcoe_start_io() for each packet, if all skb have been
1305 * transmitted, return qlen or -1 if a error occurs, then restore
1306 * wait_queue and try again later.
1307 *
1308 * The wait_queue is used when the skb transmit fails. skb will go
1309 * in the wait_queue which will be emptied by the time function OR
1310 * by the next skb transmit.
1311 *
1312 * Returns: 0 for success
1313 */
1314static int fcoe_check_wait_queue(struct fc_lport *lp)
1315{
1316 struct fcoe_softc *fc = lport_priv(lp);
1317 struct sk_buff *skb;
1318 int rc = -1;
1319
1320 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1321 if (fc->fcoe_pending_queue_active)
1322 goto out;
1323 fc->fcoe_pending_queue_active = 1;
1324
1325 while (fc->fcoe_pending_queue.qlen) {
1326 /* keep qlen > 0 until fcoe_start_io succeeds */
1327 fc->fcoe_pending_queue.qlen++;
1328 skb = __skb_dequeue(&fc->fcoe_pending_queue);
1329
1330 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1331 rc = fcoe_start_io(skb);
1332 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1333
1334 if (rc) {
1335 __skb_queue_head(&fc->fcoe_pending_queue, skb);
1336 /* undo temporary increment above */
1337 fc->fcoe_pending_queue.qlen--;
1338 break;
1339 }
1340 /* undo temporary increment above */
1341 fc->fcoe_pending_queue.qlen--;
1342 }
1343
1344 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1345 lp->qfull = 0;
1346 fc->fcoe_pending_queue_active = 0;
1347 rc = fc->fcoe_pending_queue.qlen;
1348out:
1349 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1350 return rc;
1351}
1352
1353/**
1354 * fcoe_dev_setup() - setup link change notification interface
1355 */
1356static void fcoe_dev_setup()
1357{
1358 /*
1359 * here setup a interface specific wd time to
1360 * monitor the link state
1361 */
1362 register_netdevice_notifier(&fcoe_notifier);
1363}
1364
1365/**
1366 * fcoe_dev_setup() - cleanup link change notification interface
1367 */
1368static void fcoe_dev_cleanup(void)
1369{
1370 unregister_netdevice_notifier(&fcoe_notifier);
1371}
1372
1373/**
1374 * fcoe_device_notification() - netdev event notification callback
1375 * @notifier: context of the notification
1376 * @event: type of event
1377 * @ptr: fixed array for output parsed ifname
1378 *
1379 * This function is called by the ethernet driver in case of link change event
1380 *
1381 * Returns: 0 for success
1382 */
1383static int fcoe_device_notification(struct notifier_block *notifier,
1384 ulong event, void *ptr)
1385{
1386 struct fc_lport *lp = NULL;
1387 struct net_device *real_dev = ptr;
1388 struct fcoe_softc *fc;
1389 struct fcoe_dev_stats *stats;
1390 u32 link_possible = 1;
1391 u32 mfs;
1392 int rc = NOTIFY_OK;
1393
1394 read_lock(&fcoe_hostlist_lock);
1395 list_for_each_entry(fc, &fcoe_hostlist, list) {
1396 if (fc->real_dev == real_dev) {
1397 lp = fc->ctlr.lp;
1398 break;
1399 }
1400 }
1401 read_unlock(&fcoe_hostlist_lock);
1402 if (lp == NULL) {
1403 rc = NOTIFY_DONE;
1404 goto out;
1405 }
1406
1407 switch (event) {
1408 case NETDEV_DOWN:
1409 case NETDEV_GOING_DOWN:
1410 link_possible = 0;
1411 break;
1412 case NETDEV_UP:
1413 case NETDEV_CHANGE:
1414 break;
1415 case NETDEV_CHANGEMTU:
1416 mfs = fc->real_dev->mtu -
1417 (sizeof(struct fcoe_hdr) +
1418 sizeof(struct fcoe_crc_eof));
1419 if (mfs >= FC_MIN_MAX_FRAME)
1420 fc_set_mfs(lp, mfs);
1421 break;
1422 case NETDEV_REGISTER:
1423 break;
1424 default:
1425 FC_DBG("Unknown event %ld from netdev netlink\n", event);
1426 }
1427 if (link_possible && !fcoe_link_ok(lp))
1428 fcoe_ctlr_link_up(&fc->ctlr);
1429 else if (fcoe_ctlr_link_down(&fc->ctlr)) {
1430 stats = fc_lport_get_stats(lp);
1431 stats->LinkFailureCount++;
1432 fcoe_clean_pending_queue(lp);
1433 }
1434out:
1435 return rc;
1436}
1437
1438/**
1439 * fcoe_if_to_netdev() - parse a name buffer to get netdev
1440 * @ifname: fixed array for output parsed ifname
1441 * @buffer: incoming buffer to be copied
1442 *
1443 * Returns: NULL or ptr to netdeive
1444 */
1445static struct net_device *fcoe_if_to_netdev(const char *buffer)
1446{
1447 char *cp;
1448 char ifname[IFNAMSIZ + 2];
1449
1450 if (buffer) {
1451 strlcpy(ifname, buffer, IFNAMSIZ);
1452 cp = ifname + strlen(ifname);
1453 while (--cp >= ifname && *cp == '\n')
1454 *cp = '\0';
1455 return dev_get_by_name(&init_net, ifname);
1456 }
1457 return NULL;
1458}
1459
1460/**
1461 * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev
1462 * @netdev: the target netdev
1463 *
1464 * Returns: ptr to the struct module, NULL for failure
1465 */
1466static struct module *
1467fcoe_netdev_to_module_owner(const struct net_device *netdev)
1468{
1469 struct device *dev;
1470
1471 if (!netdev)
1472 return NULL;
1473
1474 dev = netdev->dev.parent;
1475 if (!dev)
1476 return NULL;
1477
1478 if (!dev->driver)
1479 return NULL;
1480
1481 return dev->driver->owner;
1482}
1483
1484/**
1485 * fcoe_ethdrv_get() - Hold the Ethernet driver
1486 * @netdev: the target netdev
1487 *
1488 * Holds the Ethernet driver module by try_module_get() for
1489 * the corresponding netdev.
1490 *
1491 * Returns: 0 for succsss
1492 */
1493static int fcoe_ethdrv_get(const struct net_device *netdev)
1494{
1495 struct module *owner;
1496
1497 owner = fcoe_netdev_to_module_owner(netdev);
1498 if (owner) {
1499 printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
1500 module_name(owner), netdev->name);
1501 return try_module_get(owner);
1502 }
1503 return -ENODEV;
1504}
1505
1506/**
1507 * fcoe_ethdrv_put() - Release the Ethernet driver
1508 * @netdev: the target netdev
1509 *
1510 * Releases the Ethernet driver module by module_put for
1511 * the corresponding netdev.
1512 *
1513 * Returns: 0 for succsss
1514 */
1515static int fcoe_ethdrv_put(const struct net_device *netdev)
1516{
1517 struct module *owner;
1518
1519 owner = fcoe_netdev_to_module_owner(netdev);
1520 if (owner) {
1521 printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
1522 module_name(owner), netdev->name);
1523 module_put(owner);
1524 return 0;
1525 }
1526 return -ENODEV;
1527}
1528
1529/**
1530 * fcoe_destroy() - handles the destroy from sysfs
1531 * @buffer: expcted to be a eth if name
1532 * @kp: associated kernel param
1533 *
1534 * Returns: 0 for success
1535 */
1536static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1537{
1538 int rc;
1539 struct net_device *netdev;
1540
1541 netdev = fcoe_if_to_netdev(buffer);
1542 if (!netdev) {
1543 rc = -ENODEV;
1544 goto out_nodev;
1545 }
1546 /* look for existing lport */
1547 if (!fcoe_hostlist_lookup(netdev)) {
1548 rc = -ENODEV;
1549 goto out_putdev;
1550 }
1551 rc = fcoe_if_destroy(netdev);
1552 if (rc) {
1553 printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n",
1554 netdev->name);
1555 rc = -EIO;
1556 goto out_putdev;
1557 }
1558 fcoe_ethdrv_put(netdev);
1559 rc = 0;
1560out_putdev:
1561 dev_put(netdev);
1562out_nodev:
1563 return rc;
1564}
1565
1566/**
1567 * fcoe_create() - Handles the create call from sysfs
1568 * @buffer: expcted to be a eth if name
1569 * @kp: associated kernel param
1570 *
1571 * Returns: 0 for success
1572 */
1573static int fcoe_create(const char *buffer, struct kernel_param *kp)
1574{
1575 int rc;
1576 struct net_device *netdev;
1577
1578 netdev = fcoe_if_to_netdev(buffer);
1579 if (!netdev) {
1580 rc = -ENODEV;
1581 goto out_nodev;
1582 }
1583 /* look for existing lport */
1584 if (fcoe_hostlist_lookup(netdev)) {
1585 rc = -EEXIST;
1586 goto out_putdev;
1587 }
1588 fcoe_ethdrv_get(netdev);
1589
1590 rc = fcoe_if_create(netdev);
1591 if (rc) {
1592 printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n",
1593 netdev->name);
1594 fcoe_ethdrv_put(netdev);
1595 rc = -EIO;
1596 goto out_putdev;
1597 }
1598 rc = 0;
1599out_putdev:
1600 dev_put(netdev);
1601out_nodev:
1602 return rc;
1603}
1604
1605module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1606__MODULE_PARM_TYPE(create, "string");
1607MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
1608module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1609__MODULE_PARM_TYPE(destroy, "string");
1610MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1611
1612/**
1613 * fcoe_link_ok() - Check if link is ok for the fc_lport
1614 * @lp: ptr to the fc_lport
1615 *
1616 * Any permanently-disqualifying conditions have been previously checked.
1617 * This also updates the speed setting, which may change with link for 100/1000.
1618 *
1619 * This function should probably be checking for PAUSE support at some point
1620 * in the future. Currently Per-priority-pause is not determinable using
1621 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1622 *
1623 * Returns: 0 if link is OK for use by FCoE.
1624 *
1625 */
1626int fcoe_link_ok(struct fc_lport *lp)
1627{
1628 struct fcoe_softc *fc = lport_priv(lp);
1629 struct net_device *dev = fc->real_dev;
1630 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1631 int rc = 0;
1632
1633 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1634 dev = fc->phys_dev;
1635 if (dev->ethtool_ops->get_settings) {
1636 dev->ethtool_ops->get_settings(dev, &ecmd);
1637 lp->link_supported_speeds &=
1638 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1639 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1640 SUPPORTED_1000baseT_Full))
1641 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1642 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1643 lp->link_supported_speeds |=
1644 FC_PORTSPEED_10GBIT;
1645 if (ecmd.speed == SPEED_1000)
1646 lp->link_speed = FC_PORTSPEED_1GBIT;
1647 if (ecmd.speed == SPEED_10000)
1648 lp->link_speed = FC_PORTSPEED_10GBIT;
1649 }
1650 } else
1651 rc = -1;
1652
1653 return rc;
1654}
1655EXPORT_SYMBOL_GPL(fcoe_link_ok);
1656
1657/**
1658 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1659 * @lp: the fc_lport
1660 */
1661void fcoe_percpu_clean(struct fc_lport *lp)
1662{
1663 struct fcoe_percpu_s *pp;
1664 struct fcoe_rcv_info *fr;
1665 struct sk_buff_head *list;
1666 struct sk_buff *skb, *next;
1667 struct sk_buff *head;
1668 unsigned int cpu;
1669
1670 for_each_possible_cpu(cpu) {
1671 pp = &per_cpu(fcoe_percpu, cpu);
1672 spin_lock_bh(&pp->fcoe_rx_list.lock);
1673 list = &pp->fcoe_rx_list;
1674 head = list->next;
1675 for (skb = head; skb != (struct sk_buff *)list;
1676 skb = next) {
1677 next = skb->next;
1678 fr = fcoe_dev_from_skb(skb);
1679 if (fr->fr_dev == lp) {
1680 __skb_unlink(skb, list);
1681 kfree_skb(skb);
1682 }
1683 }
1684 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1685 }
1686}
1687EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
1688
1689/**
1690 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1691 * @lp: the corresponding fc_lport
1692 *
1693 * Returns: none
1694 */
1695void fcoe_clean_pending_queue(struct fc_lport *lp)
1696{
1697 struct fcoe_softc *fc = lport_priv(lp);
1698 struct sk_buff *skb;
1699
1700 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1701 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1702 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1703 kfree_skb(skb);
1704 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1705 }
1706 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1707}
1708EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
1709
1710/**
1711 * fcoe_reset() - Resets the fcoe
1712 * @shost: shost the reset is from
1713 *
1714 * Returns: always 0
1715 */
1716int fcoe_reset(struct Scsi_Host *shost)
1717{
1718 struct fc_lport *lport = shost_priv(shost);
1719 fc_lport_reset(lport);
1720 return 0;
1721}
1722EXPORT_SYMBOL_GPL(fcoe_reset);
1723
1724/**
1725 * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
1726 * @device: this is currently ptr to net_device
1727 *
1728 * Returns: NULL or the located fcoe_softc
1729 */
1730static struct fcoe_softc *
1731fcoe_hostlist_lookup_softc(const struct net_device *dev)
1732{
1733 struct fcoe_softc *fc;
1734
1735 read_lock(&fcoe_hostlist_lock);
1736 list_for_each_entry(fc, &fcoe_hostlist, list) {
1737 if (fc->real_dev == dev) {
1738 read_unlock(&fcoe_hostlist_lock);
1739 return fc;
1740 }
1741 }
1742 read_unlock(&fcoe_hostlist_lock);
1743 return NULL;
1744}
1745
1746/**
1747 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1748 * @netdev: ptr to net_device
1749 *
1750 * Returns: 0 for success
1751 */
1752struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1753{
1754 struct fcoe_softc *fc;
1755
1756 fc = fcoe_hostlist_lookup_softc(netdev);
1757
1758 return (fc) ? fc->ctlr.lp : NULL;
1759}
1760EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
1761
1762/**
1763 * fcoe_hostlist_add() - Add a lport to lports list
1764 * @lp: ptr to the fc_lport to badded
1765 *
1766 * Returns: 0 for success
1767 */
1768int fcoe_hostlist_add(const struct fc_lport *lp)
1769{
1770 struct fcoe_softc *fc;
1771
1772 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1773 if (!fc) {
1774 fc = lport_priv(lp);
1775 write_lock_bh(&fcoe_hostlist_lock);
1776 list_add_tail(&fc->list, &fcoe_hostlist);
1777 write_unlock_bh(&fcoe_hostlist_lock);
1778 }
1779 return 0;
1780}
1781EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
1782
1783/**
1784 * fcoe_hostlist_remove() - remove a lport from lports list
1785 * @lp: ptr to the fc_lport to badded
1786 *
1787 * Returns: 0 for success
1788 */
1789int fcoe_hostlist_remove(const struct fc_lport *lp)
1790{
1791 struct fcoe_softc *fc;
1792
1793 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1794 BUG_ON(!fc);
1795 write_lock_bh(&fcoe_hostlist_lock);
1796 list_del(&fc->list);
1797 write_unlock_bh(&fcoe_hostlist_lock);
1798
1799 return 0;
1800}
1801EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
1802
1803/**
1804 * fcoe_init() - fcoe module loading initialization
1805 *
1806 * Returns 0 on success, negative on failure
1807 */
1808static int __init fcoe_init(void)
1809{
1810 unsigned int cpu;
1811 int rc = 0;
1812 struct fcoe_percpu_s *p;
1813
1814 INIT_LIST_HEAD(&fcoe_hostlist);
1815 rwlock_init(&fcoe_hostlist_lock);
1816
1817 for_each_possible_cpu(cpu) {
1818 p = &per_cpu(fcoe_percpu, cpu);
1819 skb_queue_head_init(&p->fcoe_rx_list);
1820 }
1821
1822 for_each_online_cpu(cpu)
1823 fcoe_percpu_thread_create(cpu);
1824
1825 /* Initialize per CPU interrupt thread */
1826 rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1827 if (rc)
1828 goto out_free;
1829
1830 /* Setup link change notification */
1831 fcoe_dev_setup();
1832
1833 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1834
1835 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1836
1837 fcoe_if_init();
1838
1839 return 0;
1840
1841out_free:
1842 for_each_online_cpu(cpu) {
1843 fcoe_percpu_thread_destroy(cpu);
1844 }
1845
1846 return rc;
1847}
1848module_init(fcoe_init);
1849
1850/**
1851 * fcoe_exit() - fcoe module unloading cleanup
1852 *
1853 * Returns 0 on success, negative on failure
1854 */
1855static void __exit fcoe_exit(void)
1856{
1857 unsigned int cpu;
1858 struct fcoe_softc *fc, *tmp;
1859
1860 fcoe_dev_cleanup();
1861
1862 /* Stop the timer */
1863 del_timer_sync(&fcoe_timer);
1864
1865 /* releases the associated fcoe hosts */
1866 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1867 fcoe_if_destroy(fc->real_dev);
1868
1869 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
1870
1871 for_each_online_cpu(cpu) {
1872 fcoe_percpu_thread_destroy(cpu);
1873 }
1874
1875 /* detach from scsi transport */
1876 fcoe_if_exit();
1877}
1878module_exit(fcoe_exit);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
new file mode 100644
index 000000000000..917aae886897
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright(c) 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#ifndef _FCOE_H_
21#define _FCOE_H_
22
23#include <linux/skbuff.h>
24#include <linux/kthread.h>
25
26#define FCOE_MAX_QUEUE_DEPTH 256
27#define FCOE_LOW_QUEUE_DEPTH 32
28
29#define FCOE_WORD_TO_BYTE 4
30
31#define FCOE_VERSION "0.1"
32#define FCOE_NAME "fcoe"
33#define FCOE_VENDOR "Open-FCoE.org"
34
35#define FCOE_MAX_LUN 255
36#define FCOE_MAX_FCP_TARGET 256
37
38#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
39
40#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
41#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
42
43/*
44 * this percpu struct for fcoe
45 */
46struct fcoe_percpu_s {
47 struct task_struct *thread;
48 struct sk_buff_head fcoe_rx_list;
49 struct page *crc_eof_page;
50 int crc_eof_offset;
51};
52
53/*
54 * the fcoe sw transport private data
55 */
56struct fcoe_softc {
57 struct list_head list;
58 struct net_device *real_dev;
59 struct net_device *phys_dev; /* device with ethtool_ops */
60 struct packet_type fcoe_packet_type;
61 struct packet_type fip_packet_type;
62 struct sk_buff_head fcoe_pending_queue;
63 u8 fcoe_pending_queue_active;
64 struct fcoe_ctlr ctlr;
65};
66
67#define fcoe_from_ctlr(fc) container_of(fc, struct fcoe_softc, ctlr)
68
69static inline struct net_device *fcoe_netdev(
70 const struct fc_lport *lp)
71{
72 return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
73}
74
75#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
deleted file mode 100644
index 2bbbe3c0cc7b..000000000000
--- a/drivers/scsi/fcoe/fcoe_sw.c
+++ /dev/null
@@ -1,561 +0,0 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/kernel.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/if_vlan.h>
29#include <net/rtnetlink.h>
30
31#include <scsi/fc/fc_els.h>
32#include <scsi/fc/fc_encaps.h>
33#include <scsi/fc/fc_fs.h>
34#include <scsi/scsi_transport.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include <scsi/libfc.h>
38#include <scsi/libfcoe.h>
39#include <scsi/fc_transport_fcoe.h>
40
41#define FCOE_SW_VERSION "0.1"
42#define FCOE_SW_NAME "fcoesw"
43#define FCOE_SW_VENDOR "Open-FCoE.org"
44
45#define FCOE_MAX_LUN 255
46#define FCOE_MAX_FCP_TARGET 256
47
48#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
49
50#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
51#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
52
53static struct scsi_transport_template *scsi_transport_fcoe_sw;
54
55struct fc_function_template fcoe_sw_transport_function = {
56 .show_host_node_name = 1,
57 .show_host_port_name = 1,
58 .show_host_supported_classes = 1,
59 .show_host_supported_fc4s = 1,
60 .show_host_active_fc4s = 1,
61 .show_host_maxframe_size = 1,
62
63 .show_host_port_id = 1,
64 .show_host_supported_speeds = 1,
65 .get_host_speed = fc_get_host_speed,
66 .show_host_speed = 1,
67 .show_host_port_type = 1,
68 .get_host_port_state = fc_get_host_port_state,
69 .show_host_port_state = 1,
70 .show_host_symbolic_name = 1,
71
72 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
73 .show_rport_maxframe_size = 1,
74 .show_rport_supported_classes = 1,
75
76 .show_host_fabric_name = 1,
77 .show_starget_node_name = 1,
78 .show_starget_port_name = 1,
79 .show_starget_port_id = 1,
80 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
81 .show_rport_dev_loss_tmo = 1,
82 .get_fc_host_stats = fc_get_host_stats,
83 .issue_fc_host_lip = fcoe_reset,
84
85 .terminate_rport_io = fc_rport_terminate_io,
86};
87
88static struct scsi_host_template fcoe_sw_shost_template = {
89 .module = THIS_MODULE,
90 .name = "FCoE Driver",
91 .proc_name = FCOE_SW_NAME,
92 .queuecommand = fc_queuecommand,
93 .eh_abort_handler = fc_eh_abort,
94 .eh_device_reset_handler = fc_eh_device_reset,
95 .eh_host_reset_handler = fc_eh_host_reset,
96 .slave_alloc = fc_slave_alloc,
97 .change_queue_depth = fc_change_queue_depth,
98 .change_queue_type = fc_change_queue_type,
99 .this_id = -1,
100 .cmd_per_lun = 32,
101 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
102 .use_clustering = ENABLE_CLUSTERING,
103 .sg_tablesize = SG_ALL,
104 .max_sectors = 0xffff,
105};
106
107/**
108 * fcoe_sw_lport_config() - sets up the fc_lport
109 * @lp: ptr to the fc_lport
110 * @shost: ptr to the parent scsi host
111 *
112 * Returns: 0 for success
113 */
114static int fcoe_sw_lport_config(struct fc_lport *lp)
115{
116 int i = 0;
117
118 lp->link_up = 0;
119 lp->qfull = 0;
120 lp->max_retry_count = 3;
121 lp->e_d_tov = 2 * 1000; /* FC-FS default */
122 lp->r_a_tov = 2 * 2 * 1000;
123 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
124 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
125
126 /*
127 * allocate per cpu stats block
128 */
129 for_each_online_cpu(i)
130 lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
131 GFP_KERNEL);
132
133 /* lport fc_lport related configuration */
134 fc_lport_config(lp);
135
136 /* offload related configuration */
137 lp->crc_offload = 0;
138 lp->seq_offload = 0;
139 lp->lro_enabled = 0;
140 lp->lro_xid = 0;
141 lp->lso_max = 0;
142
143 return 0;
144}
145
146/**
147 * fcoe_sw_netdev_config() - Set up netdev for SW FCoE
148 * @lp : ptr to the fc_lport
149 * @netdev : ptr to the associated netdevice struct
150 *
151 * Must be called after fcoe_sw_lport_config() as it will use lport mutex
152 *
153 * Returns : 0 for success
154 */
155static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
156{
157 u32 mfs;
158 u64 wwnn, wwpn;
159 struct fcoe_softc *fc;
160 u8 flogi_maddr[ETH_ALEN];
161
162 /* Setup lport private data to point to fcoe softc */
163 fc = lport_priv(lp);
164 fc->lp = lp;
165 fc->real_dev = netdev;
166 fc->phys_dev = netdev;
167
168 /* Require support for get_pauseparam ethtool op. */
169 if (netdev->priv_flags & IFF_802_1Q_VLAN)
170 fc->phys_dev = vlan_dev_real_dev(netdev);
171
172 /* Do not support for bonding device */
173 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
174 (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
175 (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
176 return -EOPNOTSUPP;
177 }
178
179 /*
180 * Determine max frame size based on underlying device and optional
181 * user-configured limit. If the MFS is too low, fcoe_link_ok()
182 * will return 0, so do this first.
183 */
184 mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
185 sizeof(struct fcoe_crc_eof));
186 if (fc_set_mfs(lp, mfs))
187 return -EINVAL;
188
189 if (!fcoe_link_ok(lp))
190 lp->link_up = 1;
191
192 /* offload features support */
193 if (fc->real_dev->features & NETIF_F_SG)
194 lp->sg_supp = 1;
195
196#ifdef NETIF_F_FCOE_CRC
197 if (netdev->features & NETIF_F_FCOE_CRC) {
198 lp->crc_offload = 1;
199 printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
200 netdev->name);
201 }
202#endif
203#ifdef NETIF_F_FSO
204 if (netdev->features & NETIF_F_FSO) {
205 lp->seq_offload = 1;
206 lp->lso_max = netdev->gso_max_size;
207 printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
208 netdev->name, lp->lso_max);
209 }
210#endif
211 if (netdev->fcoe_ddp_xid) {
212 lp->lro_enabled = 1;
213 lp->lro_xid = netdev->fcoe_ddp_xid;
214 printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
215 netdev->name, lp->lro_xid);
216 }
217 skb_queue_head_init(&fc->fcoe_pending_queue);
218 fc->fcoe_pending_queue_active = 0;
219
220 /* setup Source Mac Address */
221 memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
222 fc->real_dev->addr_len);
223
224 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
225 fc_set_wwnn(lp, wwnn);
226 /* XXX - 3rd arg needs to be vlan id */
227 wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
228 fc_set_wwpn(lp, wwpn);
229
230 /*
231 * Add FCoE MAC address as second unicast MAC address
232 * or enter promiscuous mode if not capable of listening
233 * for multiple unicast MACs.
234 */
235 rtnl_lock();
236 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
237 dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
238 rtnl_unlock();
239
240 /*
241 * setup the receive function from ethernet driver
242 * on the ethertype for the given device
243 */
244 fc->fcoe_packet_type.func = fcoe_rcv;
245 fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
246 fc->fcoe_packet_type.dev = fc->real_dev;
247 dev_add_pack(&fc->fcoe_packet_type);
248
249 return 0;
250}
251
252/**
253 * fcoe_sw_shost_config() - Sets up fc_lport->host
254 * @lp : ptr to the fc_lport
255 * @shost : ptr to the associated scsi host
256 * @dev : device associated to scsi host
257 *
258 * Must be called after fcoe_sw_lport_config() and fcoe_sw_netdev_config()
259 *
260 * Returns : 0 for success
261 */
262static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
263 struct device *dev)
264{
265 int rc = 0;
266
267 /* lport scsi host config */
268 lp->host = shost;
269
270 lp->host->max_lun = FCOE_MAX_LUN;
271 lp->host->max_id = FCOE_MAX_FCP_TARGET;
272 lp->host->max_channel = 0;
273 lp->host->transportt = scsi_transport_fcoe_sw;
274
275 /* add the new host to the SCSI-ml */
276 rc = scsi_add_host(lp->host, dev);
277 if (rc) {
278 FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
279 return rc;
280 }
281 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
282 FCOE_SW_NAME, FCOE_SW_VERSION,
283 fcoe_netdev(lp)->name);
284
285 return 0;
286}
287
288/**
289 * fcoe_sw_em_config() - allocates em for this lport
290 * @lp: the port that em is to allocated for
291 *
292 * Returns : 0 on success
293 */
294static inline int fcoe_sw_em_config(struct fc_lport *lp)
295{
296 BUG_ON(lp->emp);
297
298 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
299 FCOE_MIN_XID, FCOE_MAX_XID);
300 if (!lp->emp)
301 return -ENOMEM;
302
303 return 0;
304}
305
306/**
307 * fcoe_sw_destroy() - FCoE software HBA tear-down function
308 * @netdev: ptr to the associated net_device
309 *
310 * Returns: 0 if link is OK for use by FCoE.
311 */
312static int fcoe_sw_destroy(struct net_device *netdev)
313{
314 int cpu;
315 struct fc_lport *lp = NULL;
316 struct fcoe_softc *fc;
317 u8 flogi_maddr[ETH_ALEN];
318
319 BUG_ON(!netdev);
320
321 printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
322 netdev->name);
323
324 lp = fcoe_hostlist_lookup(netdev);
325 if (!lp)
326 return -ENODEV;
327
328 fc = lport_priv(lp);
329
330 /* Logout of the fabric */
331 fc_fabric_logoff(lp);
332
333 /* Remove the instance from fcoe's list */
334 fcoe_hostlist_remove(lp);
335
336 /* Don't listen for Ethernet packets anymore */
337 dev_remove_pack(&fc->fcoe_packet_type);
338
339 /* Cleanup the fc_lport */
340 fc_lport_destroy(lp);
341 fc_fcp_destroy(lp);
342
343 /* Detach from the scsi-ml */
344 fc_remove_host(lp->host);
345 scsi_remove_host(lp->host);
346
347 /* There are no more rports or I/O, free the EM */
348 if (lp->emp)
349 fc_exch_mgr_free(lp->emp);
350
351 /* Delete secondary MAC addresses */
352 rtnl_lock();
353 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
354 dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
355 if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
356 dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
357 rtnl_unlock();
358
359 /* Free the per-CPU revieve threads */
360 fcoe_percpu_clean(lp);
361
362 /* Free existing skbs */
363 fcoe_clean_pending_queue(lp);
364
365 /* Free memory used by statistical counters */
366 for_each_online_cpu(cpu)
367 kfree(lp->dev_stats[cpu]);
368
369 /* Release the net_device and Scsi_Host */
370 dev_put(fc->real_dev);
371 scsi_host_put(lp->host);
372
373 return 0;
374}
375
376/*
377 * fcoe_sw_ddp_setup - calls LLD's ddp_setup through net_device
378 * @lp: the corresponding fc_lport
379 * @xid: the exchange id for this ddp transfer
380 * @sgl: the scatterlist describing this transfer
381 * @sgc: number of sg items
382 *
383 * Returns : 0 no ddp
384 */
385static int fcoe_sw_ddp_setup(struct fc_lport *lp, u16 xid,
386 struct scatterlist *sgl, unsigned int sgc)
387{
388 struct net_device *n = fcoe_netdev(lp);
389
390 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
391 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
392
393 return 0;
394}
395
396/*
397 * fcoe_sw_ddp_done - calls LLD's ddp_done through net_device
398 * @lp: the corresponding fc_lport
399 * @xid: the exchange id for this ddp transfer
400 *
401 * Returns : the length of data that have been completed by ddp
402 */
403static int fcoe_sw_ddp_done(struct fc_lport *lp, u16 xid)
404{
405 struct net_device *n = fcoe_netdev(lp);
406
407 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
408 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
409 return 0;
410}
411
412static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
413 .frame_send = fcoe_xmit,
414 .ddp_setup = fcoe_sw_ddp_setup,
415 .ddp_done = fcoe_sw_ddp_done,
416};
417
418/**
419 * fcoe_sw_create() - this function creates the fcoe interface
420 * @netdev: pointer the associated netdevice
421 *
422 * Creates fc_lport struct and scsi_host for lport, configures lport
423 * and starts fabric login.
424 *
425 * Returns : 0 on success
426 */
427static int fcoe_sw_create(struct net_device *netdev)
428{
429 int rc;
430 struct fc_lport *lp = NULL;
431 struct fcoe_softc *fc;
432 struct Scsi_Host *shost;
433
434 BUG_ON(!netdev);
435
436 printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
437 netdev->name);
438
439 lp = fcoe_hostlist_lookup(netdev);
440 if (lp)
441 return -EEXIST;
442
443 shost = fcoe_host_alloc(&fcoe_sw_shost_template,
444 sizeof(struct fcoe_softc));
445 if (!shost) {
446 FC_DBG("Could not allocate host structure\n");
447 return -ENOMEM;
448 }
449 lp = shost_priv(shost);
450 fc = lport_priv(lp);
451
452 /* configure fc_lport, e.g., em */
453 rc = fcoe_sw_lport_config(lp);
454 if (rc) {
455 FC_DBG("Could not configure lport\n");
456 goto out_host_put;
457 }
458
459 /* configure lport network properties */
460 rc = fcoe_sw_netdev_config(lp, netdev);
461 if (rc) {
462 FC_DBG("Could not configure netdev for lport\n");
463 goto out_host_put;
464 }
465
466 /* configure lport scsi host properties */
467 rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
468 if (rc) {
469 FC_DBG("Could not configure shost for lport\n");
470 goto out_host_put;
471 }
472
473 /* lport exch manager allocation */
474 rc = fcoe_sw_em_config(lp);
475 if (rc) {
476 FC_DBG("Could not configure em for lport\n");
477 goto out_host_put;
478 }
479
480 /* Initialize the library */
481 rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
482 if (rc) {
483 FC_DBG("Could not configure libfc for lport!\n");
484 goto out_lp_destroy;
485 }
486
487 /* add to lports list */
488 fcoe_hostlist_add(lp);
489
490 lp->boot_time = jiffies;
491
492 fc_fabric_login(lp);
493
494 dev_hold(netdev);
495
496 return rc;
497
498out_lp_destroy:
499 fc_exch_mgr_free(lp->emp); /* Free the EM */
500out_host_put:
501 scsi_host_put(lp->host);
502 return rc;
503}
504
505/**
506 * fcoe_sw_match() - The FCoE SW transport match function
507 *
508 * Returns : false always
509 */
510static bool fcoe_sw_match(struct net_device *netdev)
511{
512 /* FIXME - for sw transport, always return false */
513 return false;
514}
515
516/* the sw hba fcoe transport */
517struct fcoe_transport fcoe_sw_transport = {
518 .name = "fcoesw",
519 .create = fcoe_sw_create,
520 .destroy = fcoe_sw_destroy,
521 .match = fcoe_sw_match,
522 .vendor = 0x0,
523 .device = 0xffff,
524};
525
526/**
527 * fcoe_sw_init() - Registers fcoe_sw_transport
528 *
529 * Returns : 0 on success
530 */
531int __init fcoe_sw_init(void)
532{
533 /* attach to scsi transport */
534 scsi_transport_fcoe_sw =
535 fc_attach_transport(&fcoe_sw_transport_function);
536
537 if (!scsi_transport_fcoe_sw) {
538 printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
539 return -ENODEV;
540 }
541
542 mutex_init(&fcoe_sw_transport.devlock);
543 INIT_LIST_HEAD(&fcoe_sw_transport.devlist);
544
545 /* register sw transport */
546 fcoe_transport_register(&fcoe_sw_transport);
547 return 0;
548}
549
550/**
551 * fcoe_sw_exit() - Unregisters fcoe_sw_transport
552 *
553 * Returns : 0 on success
554 */
555int __exit fcoe_sw_exit(void)
556{
557 /* dettach the transport */
558 fc_release_transport(scsi_transport_fcoe_sw);
559 fcoe_transport_unregister(&fcoe_sw_transport);
560 return 0;
561}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 0d6f5beb7f9e..f410f4abb548 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 2 * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2009 Intel Corporation. All rights reserved.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -17,1200 +18,1260 @@
17 * Maintained at www.Open-FCoE.org 18 * Maintained at www.Open-FCoE.org
18 */ 19 */
19 20
21#include <linux/types.h>
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/list.h>
23#include <linux/spinlock.h> 25#include <linux/spinlock.h>
24#include <linux/skbuff.h> 26#include <linux/timer.h>
25#include <linux/netdevice.h> 27#include <linux/netdevice.h>
26#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
27#include <linux/ethtool.h> 29#include <linux/ethtool.h>
28#include <linux/if_ether.h> 30#include <linux/if_ether.h>
29#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
30#include <linux/kthread.h> 32#include <linux/netdevice.h>
31#include <linux/crc32.h> 33#include <linux/errno.h>
32#include <linux/cpu.h> 34#include <linux/bitops.h>
33#include <linux/fs.h>
34#include <linux/sysfs.h>
35#include <linux/ctype.h>
36#include <scsi/scsi_tcq.h>
37#include <scsi/scsicam.h>
38#include <scsi/scsi_transport.h>
39#include <scsi/scsi_transport_fc.h>
40#include <net/rtnetlink.h> 35#include <net/rtnetlink.h>
41 36
37#include <scsi/fc/fc_els.h>
38#include <scsi/fc/fc_fs.h>
39#include <scsi/fc/fc_fip.h>
42#include <scsi/fc/fc_encaps.h> 40#include <scsi/fc/fc_encaps.h>
41#include <scsi/fc/fc_fcoe.h>
43 42
44#include <scsi/libfc.h> 43#include <scsi/libfc.h>
45#include <scsi/fc_frame.h>
46#include <scsi/libfcoe.h> 44#include <scsi/libfcoe.h>
47#include <scsi/fc_transport_fcoe.h>
48
49static int debug_fcoe;
50 45
51#define FCOE_MAX_QUEUE_DEPTH 256 46MODULE_AUTHOR("Open-FCoE.org");
52#define FCOE_LOW_QUEUE_DEPTH 32 47MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
48MODULE_LICENSE("GPL v2");
53 49
54/* destination address mode */ 50#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
55#define FCOE_GW_ADDR_MODE 0x00 51#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
56#define FCOE_FCOUI_ADDR_MODE 0x01
57 52
58#define FCOE_WORD_TO_BYTE 4 53static void fcoe_ctlr_timeout(unsigned long);
54static void fcoe_ctlr_link_work(struct work_struct *);
55static void fcoe_ctlr_recv_work(struct work_struct *);
59 56
60MODULE_AUTHOR("Open-FCoE.org"); 57static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
61MODULE_DESCRIPTION("FCoE");
62MODULE_LICENSE("GPL");
63 58
64/* fcoe host list */ 59static u32 fcoe_ctlr_debug; /* 1 for basic, 2 for noisy debug */
65LIST_HEAD(fcoe_hostlist);
66DEFINE_RWLOCK(fcoe_hostlist_lock);
67DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
68struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
69 60
61#define FIP_DBG_LVL(level, fmt, args...) \
62 do { \
63 if (fcoe_ctlr_debug >= (level)) \
64 FC_DBG(fmt, ##args); \
65 } while (0)
70 66
71/* Function Prototyes */ 67#define FIP_DBG(fmt, args...) FIP_DBG_LVL(1, fmt, ##args)
72static int fcoe_check_wait_queue(struct fc_lport *);
73static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
74#ifdef CONFIG_HOTPLUG_CPU
75static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
76#endif /* CONFIG_HOTPLUG_CPU */
77static int fcoe_device_notification(struct notifier_block *, ulong, void *);
78static void fcoe_dev_setup(void);
79static void fcoe_dev_cleanup(void);
80 68
81/* notification function from net device */ 69/*
82static struct notifier_block fcoe_notifier = { 70 * Return non-zero if FCF fcoe_size has been validated.
83 .notifier_call = fcoe_device_notification, 71 */
84}; 72static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf)
73{
74 return (fcf->flags & FIP_FL_SOL) != 0;
75}
85 76
77/*
78 * Return non-zero if the FCF is usable.
79 */
80static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
81{
82 u16 flags = FIP_FL_SOL | FIP_FL_AVAIL;
86 83
87#ifdef CONFIG_HOTPLUG_CPU 84 return (fcf->flags & flags) == flags;
88static struct notifier_block fcoe_cpu_notifier = { 85}
89 .notifier_call = fcoe_cpu_callback,
90};
91 86
92/** 87/**
93 * fcoe_create_percpu_data() - creates the associated cpu data 88 * fcoe_ctlr_init() - Initialize the FCoE Controller instance.
94 * @cpu: index for the cpu where fcoe cpu data will be created 89 * @fip: FCoE controller.
95 *
96 * create percpu stats block, from cpu add notifier
97 *
98 * Returns: none
99 */ 90 */
100static void fcoe_create_percpu_data(int cpu) 91void fcoe_ctlr_init(struct fcoe_ctlr *fip)
101{ 92{
102 struct fc_lport *lp; 93 fip->state = FIP_ST_LINK_WAIT;
103 struct fcoe_softc *fc; 94 INIT_LIST_HEAD(&fip->fcfs);
104 95 spin_lock_init(&fip->lock);
105 write_lock_bh(&fcoe_hostlist_lock); 96 fip->flogi_oxid = FC_XID_UNKNOWN;
106 list_for_each_entry(fc, &fcoe_hostlist, list) { 97 setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
107 lp = fc->lp; 98 INIT_WORK(&fip->link_work, fcoe_ctlr_link_work);
108 if (lp->dev_stats[cpu] == NULL) 99 INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
109 lp->dev_stats[cpu] = 100 skb_queue_head_init(&fip->fip_recv_list);
110 kzalloc(sizeof(struct fcoe_dev_stats),
111 GFP_KERNEL);
112 }
113 write_unlock_bh(&fcoe_hostlist_lock);
114} 101}
102EXPORT_SYMBOL(fcoe_ctlr_init);
115 103
116/** 104/**
117 * fcoe_destroy_percpu_data() - destroys the associated cpu data 105 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller.
118 * @cpu: index for the cpu where fcoe cpu data will destroyed 106 * @fip: FCoE controller.
119 *
120 * destroy percpu stats block called by cpu add/remove notifier
121 * 107 *
122 * Retuns: none 108 * Called with &fcoe_ctlr lock held.
123 */ 109 */
124static void fcoe_destroy_percpu_data(int cpu) 110static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
125{ 111{
126 struct fc_lport *lp; 112 struct fcoe_fcf *fcf;
127 struct fcoe_softc *fc; 113 struct fcoe_fcf *next;
128 114
129 write_lock_bh(&fcoe_hostlist_lock); 115 fip->sel_fcf = NULL;
130 list_for_each_entry(fc, &fcoe_hostlist, list) { 116 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
131 lp = fc->lp; 117 list_del(&fcf->list);
132 kfree(lp->dev_stats[cpu]); 118 kfree(fcf);
133 lp->dev_stats[cpu] = NULL;
134 } 119 }
135 write_unlock_bh(&fcoe_hostlist_lock); 120 fip->fcf_count = 0;
121 fip->sel_time = 0;
136} 122}
137 123
138/** 124/**
139 * fcoe_cpu_callback() - fcoe cpu hotplug event callback 125 * fcoe_ctrl_destroy() - Disable and tear-down the FCoE controller.
140 * @nfb: callback data block 126 * @fip: FCoE controller.
141 * @action: event triggering the callback 127 *
142 * @hcpu: index for the cpu of this event 128 * This is called by FCoE drivers before freeing the &fcoe_ctlr.
143 * 129 *
144 * this creates or destroys per cpu data for fcoe 130 * The receive handler will have been deleted before this to guarantee
131 * that no more recv_work will be scheduled.
145 * 132 *
146 * Returns NOTIFY_OK always. 133 * The timer routine will simply return once we set FIP_ST_DISABLED.
134 * This guarantees that no further timeouts or work will be scheduled.
147 */ 135 */
148static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, 136void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
149 void *hcpu)
150{ 137{
151 unsigned int cpu = (unsigned long)hcpu; 138 flush_work(&fip->recv_work);
152 139 spin_lock_bh(&fip->lock);
153 switch (action) { 140 fip->state = FIP_ST_DISABLED;
154 case CPU_ONLINE: 141 fcoe_ctlr_reset_fcfs(fip);
155 fcoe_create_percpu_data(cpu); 142 spin_unlock_bh(&fip->lock);
156 break; 143 del_timer_sync(&fip->timer);
157 case CPU_DEAD: 144 flush_work(&fip->link_work);
158 fcoe_destroy_percpu_data(cpu);
159 break;
160 default:
161 break;
162 }
163 return NOTIFY_OK;
164} 145}
165#endif /* CONFIG_HOTPLUG_CPU */ 146EXPORT_SYMBOL(fcoe_ctlr_destroy);
166 147
167/** 148/**
168 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ 149 * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port.
169 * @skb: the receive skb 150 * @fip: FCoE controller.
170 * @dev: associated net device
171 * @ptype: context
172 * @odldev: last device
173 *
174 * this function will receive the packet and build fc frame and pass it up
175 * 151 *
176 * Returns: 0 for success 152 * Returns the maximum packet size including the FCoE header and trailer,
153 * but not including any Ethernet or VLAN headers.
177 */ 154 */
178int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, 155static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip)
179 struct packet_type *ptype, struct net_device *olddev)
180{ 156{
181 struct fc_lport *lp; 157 /*
182 struct fcoe_rcv_info *fr; 158 * Determine the max FCoE frame size allowed, including
183 struct fcoe_softc *fc; 159 * FCoE header and trailer.
184 struct fcoe_dev_stats *stats; 160 * Note: lp->mfs is currently the payload size, not the frame size.
185 struct fc_frame_header *fh; 161 */
186 unsigned short oxid; 162 return fip->lp->mfs + sizeof(struct fc_frame_header) +
187 int cpu_idx; 163 sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof);
188 struct fcoe_percpu_s *fps; 164}
189
190 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
191 lp = fc->lp;
192 if (unlikely(lp == NULL)) {
193 FC_DBG("cannot find hba structure");
194 goto err2;
195 }
196
197 if (unlikely(debug_fcoe)) {
198 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
199 "end:%p sum:%d dev:%s", skb->len, skb->data_len,
200 skb->head, skb->data, skb_tail_pointer(skb),
201 skb_end_pointer(skb), skb->csum,
202 skb->dev ? skb->dev->name : "<NULL>");
203 165
204 } 166/**
167 * fcoe_ctlr_solicit() - Send a solicitation.
168 * @fip: FCoE controller.
169 * @fcf: Destination FCF. If NULL, a multicast solicitation is sent.
170 */
171static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
172{
173 struct sk_buff *skb;
174 struct fip_sol {
175 struct ethhdr eth;
176 struct fip_header fip;
177 struct {
178 struct fip_mac_desc mac;
179 struct fip_wwn_desc wwnn;
180 struct fip_size_desc size;
181 } __attribute__((packed)) desc;
182 } __attribute__((packed)) *sol;
183 u32 fcoe_size;
184
185 skb = dev_alloc_skb(sizeof(*sol));
186 if (!skb)
187 return;
205 188
206 /* check for FCOE packet type */ 189 sol = (struct fip_sol *)skb->data;
207 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
208 FC_DBG("wrong FC type frame");
209 goto err;
210 }
211 190
212 /* 191 memset(sol, 0, sizeof(*sol));
213 * Check for minimum frame length, and make sure required FCoE 192 memcpy(sol->eth.h_dest, fcf ? fcf->fcf_mac : fcoe_all_fcfs, ETH_ALEN);
214 * and FC headers are pulled into the linear data area. 193 memcpy(sol->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
215 */ 194 sol->eth.h_proto = htons(ETH_P_FIP);
216 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
217 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
218 goto err;
219 195
220 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 196 sol->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
221 fh = (struct fc_frame_header *) skb_transport_header(skb); 197 sol->fip.fip_op = htons(FIP_OP_DISC);
198 sol->fip.fip_subcode = FIP_SC_SOL;
199 sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW);
200 sol->fip.fip_flags = htons(FIP_FL_FPMA);
222 201
223 oxid = ntohs(fh->fh_ox_id); 202 sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
203 sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW;
204 memcpy(sol->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
224 205
225 fr = fcoe_dev_from_skb(skb); 206 sol->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
226 fr->fr_dev = lp; 207 sol->desc.wwnn.fd_desc.fip_dlen = sizeof(sol->desc.wwnn) / FIP_BPW;
227 fr->ptype = ptype; 208 put_unaligned_be64(fip->lp->wwnn, &sol->desc.wwnn.fd_wwn);
228 cpu_idx = 0;
229#ifdef CONFIG_SMP
230 /*
231 * The incoming frame exchange id(oxid) is ANDed with num of online
232 * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
233 * a per cpu kernel thread from fcoe_percpu. In case the cpu is
234 * offline or no kernel thread for derived cpu_idx then cpu_idx is
235 * initialize to first online cpu index.
236 */
237 cpu_idx = oxid & (num_online_cpus() - 1);
238 if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx))
239 cpu_idx = first_cpu(cpu_online_map);
240#endif
241 fps = fcoe_percpu[cpu_idx];
242 209
243 spin_lock_bh(&fps->fcoe_rx_list.lock); 210 fcoe_size = fcoe_ctlr_fcoe_size(fip);
244 __skb_queue_tail(&fps->fcoe_rx_list, skb); 211 sol->desc.size.fd_desc.fip_dtype = FIP_DT_FCOE_SIZE;
245 if (fps->fcoe_rx_list.qlen == 1) 212 sol->desc.size.fd_desc.fip_dlen = sizeof(sol->desc.size) / FIP_BPW;
246 wake_up_process(fps->thread); 213 sol->desc.size.fd_size = htons(fcoe_size);
247 214
248 spin_unlock_bh(&fps->fcoe_rx_list.lock); 215 skb_put(skb, sizeof(*sol));
216 skb->protocol = htons(ETH_P_802_3);
217 skb_reset_mac_header(skb);
218 skb_reset_network_header(skb);
219 fip->send(fip, skb);
249 220
250 return 0; 221 if (!fcf)
251err: 222 fip->sol_time = jiffies;
252#ifdef CONFIG_SMP
253 stats = lp->dev_stats[smp_processor_id()];
254#else
255 stats = lp->dev_stats[0];
256#endif
257 if (stats)
258 stats->ErrorFrames++;
259
260err2:
261 kfree_skb(skb);
262 return -1;
263} 223}
264EXPORT_SYMBOL_GPL(fcoe_rcv);
265 224
266/** 225/**
267 * fcoe_start_io() - pass to netdev to start xmit for fcoe 226 * fcoe_ctlr_link_up() - Start FCoE controller.
268 * @skb: the skb to be xmitted 227 * @fip: FCoE controller.
269 * 228 *
270 * Returns: 0 for success 229 * Called from the LLD when the network link is ready.
271 */ 230 */
272static inline int fcoe_start_io(struct sk_buff *skb) 231void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
273{ 232{
274 int rc; 233 spin_lock_bh(&fip->lock);
275 234 if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
276 skb_get(skb); 235 fip->last_link = 1;
277 rc = dev_queue_xmit(skb); 236 fip->link = 1;
278 if (rc != 0) 237 spin_unlock_bh(&fip->lock);
279 return rc; 238 fc_linkup(fip->lp);
280 kfree_skb(skb); 239 } else if (fip->state == FIP_ST_LINK_WAIT) {
281 return 0; 240 fip->state = FIP_ST_AUTO;
241 fip->last_link = 1;
242 fip->link = 1;
243 spin_unlock_bh(&fip->lock);
244 FIP_DBG("%s", "setting AUTO mode.\n");
245 fc_linkup(fip->lp);
246 fcoe_ctlr_solicit(fip, NULL);
247 } else
248 spin_unlock_bh(&fip->lock);
282} 249}
250EXPORT_SYMBOL(fcoe_ctlr_link_up);
283 251
284/** 252/**
285 * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof 253 * fcoe_ctlr_reset() - Reset FIP.
286 * @skb: the skb to be xmitted 254 * @fip: FCoE controller.
287 * @tlen: total len 255 * @new_state: FIP state to be entered.
288 * 256 *
289 * Returns: 0 for success 257 * Returns non-zero if the link was up and now isn't.
290 */ 258 */
291static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) 259static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state)
292{ 260{
293 struct fcoe_percpu_s *fps; 261 struct fc_lport *lp = fip->lp;
294 struct page *page; 262 int link_dropped;
295 int cpu_idx; 263
296 264 spin_lock_bh(&fip->lock);
297 cpu_idx = get_cpu(); 265 fcoe_ctlr_reset_fcfs(fip);
298 fps = fcoe_percpu[cpu_idx]; 266 del_timer(&fip->timer);
299 page = fps->crc_eof_page; 267 fip->state = new_state;
300 if (!page) { 268 fip->ctlr_ka_time = 0;
301 page = alloc_page(GFP_ATOMIC); 269 fip->port_ka_time = 0;
302 if (!page) { 270 fip->sol_time = 0;
303 put_cpu(); 271 fip->flogi_oxid = FC_XID_UNKNOWN;
304 return -ENOMEM; 272 fip->map_dest = 0;
305 } 273 fip->last_link = 0;
306 fps->crc_eof_page = page; 274 link_dropped = fip->link;
307 WARN_ON(fps->crc_eof_offset != 0); 275 fip->link = 0;
276 spin_unlock_bh(&fip->lock);
277
278 if (link_dropped)
279 fc_linkdown(lp);
280
281 if (new_state == FIP_ST_ENABLED) {
282 fcoe_ctlr_solicit(fip, NULL);
283 fc_linkup(lp);
284 link_dropped = 0;
308 } 285 }
309 286 return link_dropped;
310 get_page(page);
311 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
312 fps->crc_eof_offset, tlen);
313 skb->len += tlen;
314 skb->data_len += tlen;
315 skb->truesize += tlen;
316 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
317
318 if (fps->crc_eof_offset >= PAGE_SIZE) {
319 fps->crc_eof_page = NULL;
320 fps->crc_eof_offset = 0;
321 put_page(page);
322 }
323 put_cpu();
324 return 0;
325} 287}
326 288
327/** 289/**
328 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb 290 * fcoe_ctlr_link_down() - Stop FCoE controller.
329 * @fp: the fc_frame containg data to be checksummed 291 * @fip: FCoE controller.
330 * 292 *
331 * This uses crc32() to calculate the crc for fc frame 293 * Returns non-zero if the link was up and now isn't.
332 * Return : 32 bit crc 294 *
295 * Called from the LLD when the network link is not ready.
296 * There may be multiple calls while the link is down.
333 */ 297 */
334u32 fcoe_fc_crc(struct fc_frame *fp) 298int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
335{ 299{
336 struct sk_buff *skb = fp_skb(fp); 300 return fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT);
337 struct skb_frag_struct *frag;
338 unsigned char *data;
339 unsigned long off, len, clen;
340 u32 crc;
341 unsigned i;
342
343 crc = crc32(~0, skb->data, skb_headlen(skb));
344
345 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
346 frag = &skb_shinfo(skb)->frags[i];
347 off = frag->page_offset;
348 len = frag->size;
349 while (len > 0) {
350 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
351 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
352 KM_SKB_DATA_SOFTIRQ);
353 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
354 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
355 off += clen;
356 len -= clen;
357 }
358 }
359 return crc;
360} 301}
361EXPORT_SYMBOL_GPL(fcoe_fc_crc); 302EXPORT_SYMBOL(fcoe_ctlr_link_down);
362 303
363/** 304/**
364 * fcoe_xmit() - FCoE frame transmit function 305 * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF.
365 * @lp: the associated local port 306 * @fip: FCoE controller.
366 * @fp: the fc_frame to be transmitted 307 * @ports: 0 for controller keep-alive, 1 for port keep-alive.
308 * @sa: source MAC address.
367 * 309 *
368 * Return : 0 for success 310 * A controller keep-alive is sent every fka_period (typically 8 seconds).
311 * The source MAC is the native MAC address.
312 *
313 * A port keep-alive is sent every 90 seconds while logged in.
314 * The source MAC is the assigned mapped source address.
315 * The destination is the FCF's F-port.
369 */ 316 */
370int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 317static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
371{ 318{
372 int wlen, rc = 0;
373 u32 crc;
374 struct ethhdr *eh;
375 struct fcoe_crc_eof *cp;
376 struct sk_buff *skb; 319 struct sk_buff *skb;
377 struct fcoe_dev_stats *stats; 320 struct fip_kal {
378 struct fc_frame_header *fh; 321 struct ethhdr eth;
379 unsigned int hlen; /* header length implies the version */ 322 struct fip_header fip;
380 unsigned int tlen; /* trailer length */ 323 struct fip_mac_desc mac;
381 unsigned int elen; /* eth header, may include vlan */ 324 } __attribute__((packed)) *kal;
382 int flogi_in_progress = 0; 325 struct fip_vn_desc *vn;
383 struct fcoe_softc *fc; 326 u32 len;
384 u8 sof, eof; 327 struct fc_lport *lp;
385 struct fcoe_hdr *hp; 328 struct fcoe_fcf *fcf;
386
387 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
388
389 fc = lport_priv(lp);
390 /*
391 * if it is a flogi then we need to learn gw-addr
392 * and my own fcid
393 */
394 fh = fc_frame_header_get(fp);
395 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
396 if (fc_frame_payload_op(fp) == ELS_FLOGI) {
397 fc->flogi_oxid = ntohs(fh->fh_ox_id);
398 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
399 fc->flogi_progress = 1;
400 flogi_in_progress = 1;
401 } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
402 /*
403 * Here we must've gotten an SID by accepting an FLOGI
404 * from a point-to-point connection. Switch to using
405 * the source mac based on the SID. The destination
406 * MAC in this case would have been set by receving the
407 * FLOGI.
408 */
409 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
410 fc->flogi_progress = 0;
411 }
412 }
413
414 skb = fp_skb(fp);
415 sof = fr_sof(fp);
416 eof = fr_eof(fp);
417
418 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
419 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
420 hlen = sizeof(struct fcoe_hdr);
421 tlen = sizeof(struct fcoe_crc_eof);
422 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
423
424 /* crc offload */
425 if (likely(lp->crc_offload)) {
426 skb->ip_summed = CHECKSUM_PARTIAL;
427 skb->csum_start = skb_headroom(skb);
428 skb->csum_offset = skb->len;
429 crc = 0;
430 } else {
431 skb->ip_summed = CHECKSUM_NONE;
432 crc = fcoe_fc_crc(fp);
433 }
434 329
435 /* copy fc crc and eof to the skb buff */ 330 fcf = fip->sel_fcf;
436 if (skb_is_nonlinear(skb)) { 331 lp = fip->lp;
437 skb_frag_t *frag; 332 if (!fcf || !fc_host_port_id(lp->host))
438 if (fcoe_get_paged_crc_eof(skb, tlen)) { 333 return;
439 kfree_skb(skb);
440 return -ENOMEM;
441 }
442 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
443 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
444 + frag->page_offset;
445 } else {
446 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
447 }
448 334
449 memset(cp, 0, sizeof(*cp)); 335 len = fcoe_ctlr_fcoe_size(fip) + sizeof(struct ethhdr);
450 cp->fcoe_eof = eof; 336 BUG_ON(len < sizeof(*kal) + sizeof(*vn));
451 cp->fcoe_crc32 = cpu_to_le32(~crc); 337 skb = dev_alloc_skb(len);
338 if (!skb)
339 return;
452 340
453 if (skb_is_nonlinear(skb)) { 341 kal = (struct fip_kal *)skb->data;
454 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); 342 memset(kal, 0, len);
455 cp = NULL; 343 memcpy(kal->eth.h_dest, fcf->fcf_mac, ETH_ALEN);
344 memcpy(kal->eth.h_source, sa, ETH_ALEN);
345 kal->eth.h_proto = htons(ETH_P_FIP);
346
347 kal->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
348 kal->fip.fip_op = htons(FIP_OP_CTRL);
349 kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE;
350 kal->fip.fip_dl_len = htons((sizeof(kal->mac) +
351 ports * sizeof(*vn)) / FIP_BPW);
352 kal->fip.fip_flags = htons(FIP_FL_FPMA);
353
354 kal->mac.fd_desc.fip_dtype = FIP_DT_MAC;
355 kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW;
356 memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
357
358 if (ports) {
359 vn = (struct fip_vn_desc *)(kal + 1);
360 vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
361 vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
362 memcpy(vn->fd_mac, fip->data_src_addr, ETH_ALEN);
363 hton24(vn->fd_fc_id, fc_host_port_id(lp->host));
364 put_unaligned_be64(lp->wwpn, &vn->fd_wwpn);
456 } 365 }
457 366
458 /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */ 367 skb_put(skb, len);
459 skb_push(skb, elen + hlen); 368 skb->protocol = htons(ETH_P_802_3);
460 skb_reset_mac_header(skb); 369 skb_reset_mac_header(skb);
461 skb_reset_network_header(skb); 370 skb_reset_network_header(skb);
462 skb->mac_len = elen; 371 fip->send(fip, skb);
463 skb->protocol = htons(ETH_P_FCOE);
464 skb->dev = fc->real_dev;
465
466 /* fill up mac and fcoe headers */
467 eh = eth_hdr(skb);
468 eh->h_proto = htons(ETH_P_FCOE);
469 if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
470 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
471 else
472 /* insert GW address */
473 memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
474
475 if (unlikely(flogi_in_progress))
476 memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
477 else
478 memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
479
480 hp = (struct fcoe_hdr *)(eh + 1);
481 memset(hp, 0, sizeof(*hp));
482 if (FC_FCOE_VER)
483 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
484 hp->fcoe_sof = sof;
485
486#ifdef NETIF_F_FSO
487 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
488 if (lp->seq_offload && fr_max_payload(fp)) {
489 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
490 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
491 } else {
492 skb_shinfo(skb)->gso_type = 0;
493 skb_shinfo(skb)->gso_size = 0;
494 }
495#endif
496 /* update tx stats: regardless if LLD fails */
497 stats = lp->dev_stats[smp_processor_id()];
498 if (stats) {
499 stats->TxFrames++;
500 stats->TxWords += wlen;
501 }
502
503 /* send down to lld */
504 fr_dev(fp) = lp;
505 if (fc->fcoe_pending_queue.qlen)
506 rc = fcoe_check_wait_queue(lp);
507
508 if (rc == 0)
509 rc = fcoe_start_io(skb);
510
511 if (rc) {
512 spin_lock_bh(&fc->fcoe_pending_queue.lock);
513 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
514 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
515 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
516 lp->qfull = 1;
517 }
518
519 return 0;
520} 372}
521EXPORT_SYMBOL_GPL(fcoe_xmit);
522 373
523/** 374/**
524 * fcoe_percpu_receive_thread() - recv thread per cpu 375 * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it.
525 * @arg: ptr to the fcoe per cpu struct 376 * @fip: FCoE controller.
377 * @dtype: FIP descriptor type for the frame.
378 * @skb: FCoE ELS frame including FC header but no FCoE headers.
379 *
380 * Returns non-zero error code on failure.
381 *
382 * The caller must check that the length is a multiple of 4.
526 * 383 *
527 * Return: 0 for success 384 * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes).
385 * Headroom includes the FIP encapsulation description, FIP header, and
386 * Ethernet header. The tailroom is for the FIP MAC descriptor.
528 */ 387 */
529int fcoe_percpu_receive_thread(void *arg) 388static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
389 u8 dtype, struct sk_buff *skb)
530{ 390{
531 struct fcoe_percpu_s *p = arg; 391 struct fip_encaps_head {
532 u32 fr_len; 392 struct ethhdr eth;
533 struct fc_lport *lp; 393 struct fip_header fip;
534 struct fcoe_rcv_info *fr; 394 struct fip_encaps encaps;
535 struct fcoe_dev_stats *stats; 395 } __attribute__((packed)) *cap;
536 struct fc_frame_header *fh; 396 struct fip_mac_desc *mac;
537 struct sk_buff *skb; 397 struct fcoe_fcf *fcf;
538 struct fcoe_crc_eof crc_eof; 398 size_t dlen;
539 struct fc_frame *fp; 399
540 u8 *mac = NULL; 400 fcf = fip->sel_fcf;
541 struct fcoe_softc *fc; 401 if (!fcf)
542 struct fcoe_hdr *hp; 402 return -ENODEV;
543 403 dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
544 set_user_nice(current, -20); 404 cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
545 405
546 while (!kthread_should_stop()) { 406 memset(cap, 0, sizeof(*cap));
547 407 memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN);
548 spin_lock_bh(&p->fcoe_rx_list.lock); 408 memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
549 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { 409 cap->eth.h_proto = htons(ETH_P_FIP);
550 set_current_state(TASK_INTERRUPTIBLE); 410
551 spin_unlock_bh(&p->fcoe_rx_list.lock); 411 cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
552 schedule(); 412 cap->fip.fip_op = htons(FIP_OP_LS);
553 set_current_state(TASK_RUNNING); 413 cap->fip.fip_subcode = FIP_SC_REQ;
554 if (kthread_should_stop()) 414 cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW);
555 return 0; 415 cap->fip.fip_flags = htons(FIP_FL_FPMA);
556 spin_lock_bh(&p->fcoe_rx_list.lock); 416
557 } 417 cap->encaps.fd_desc.fip_dtype = dtype;
558 spin_unlock_bh(&p->fcoe_rx_list.lock); 418 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
559 fr = fcoe_dev_from_skb(skb); 419
560 lp = fr->fr_dev; 420 mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
561 if (unlikely(lp == NULL)) { 421 memset(mac, 0, sizeof(mac));
562 FC_DBG("invalid HBA Structure"); 422 mac->fd_desc.fip_dtype = FIP_DT_MAC;
563 kfree_skb(skb); 423 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
564 continue; 424 if (dtype != ELS_FLOGI)
565 } 425 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
566 426
567 stats = lp->dev_stats[smp_processor_id()]; 427 skb->protocol = htons(ETH_P_802_3);
568 428 skb_reset_mac_header(skb);
569 if (unlikely(debug_fcoe)) { 429 skb_reset_network_header(skb);
570 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
571 "tail:%p end:%p sum:%d dev:%s",
572 skb->len, skb->data_len,
573 skb->head, skb->data, skb_tail_pointer(skb),
574 skb_end_pointer(skb), skb->csum,
575 skb->dev ? skb->dev->name : "<NULL>");
576 }
577
578 /*
579 * Save source MAC address before discarding header.
580 */
581 fc = lport_priv(lp);
582 if (unlikely(fc->flogi_progress))
583 mac = eth_hdr(skb)->h_source;
584
585 if (skb_is_nonlinear(skb))
586 skb_linearize(skb); /* not ideal */
587
588 /*
589 * Frame length checks and setting up the header pointers
590 * was done in fcoe_rcv already.
591 */
592 hp = (struct fcoe_hdr *) skb_network_header(skb);
593 fh = (struct fc_frame_header *) skb_transport_header(skb);
594
595 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
596 if (stats) {
597 if (stats->ErrorFrames < 5)
598 FC_DBG("unknown FCoE version %x",
599 FC_FCOE_DECAPS_VER(hp));
600 stats->ErrorFrames++;
601 }
602 kfree_skb(skb);
603 continue;
604 }
605
606 skb_pull(skb, sizeof(struct fcoe_hdr));
607 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
608
609 if (stats) {
610 stats->RxFrames++;
611 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
612 }
613
614 fp = (struct fc_frame *)skb;
615 fc_frame_init(fp);
616 fr_dev(fp) = lp;
617 fr_sof(fp) = hp->fcoe_sof;
618
619 /* Copy out the CRC and EOF trailer for access */
620 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
621 kfree_skb(skb);
622 continue;
623 }
624 fr_eof(fp) = crc_eof.fcoe_eof;
625 fr_crc(fp) = crc_eof.fcoe_crc32;
626 if (pskb_trim(skb, fr_len)) {
627 kfree_skb(skb);
628 continue;
629 }
630
631 /*
632 * We only check CRC if no offload is available and if it is
633 * it's solicited data, in which case, the FCP layer would
634 * check it during the copy.
635 */
636 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
637 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
638 else
639 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
640
641 fh = fc_frame_header_get(fp);
642 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
643 fh->fh_type == FC_TYPE_FCP) {
644 fc_exch_recv(lp, lp->emp, fp);
645 continue;
646 }
647 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
648 if (le32_to_cpu(fr_crc(fp)) !=
649 ~crc32(~0, skb->data, fr_len)) {
650 if (debug_fcoe || stats->InvalidCRCCount < 5)
651 printk(KERN_WARNING "fcoe: dropping "
652 "frame with CRC error\n");
653 stats->InvalidCRCCount++;
654 stats->ErrorFrames++;
655 fc_frame_free(fp);
656 continue;
657 }
658 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
659 }
660 /* non flogi and non data exchanges are handled here */
661 if (unlikely(fc->flogi_progress))
662 fcoe_recv_flogi(fc, fp, mac);
663 fc_exch_recv(lp, lp->emp, fp);
664 }
665 return 0; 430 return 0;
666} 431}
667 432
668/** 433/**
669 * fcoe_recv_flogi() - flogi receive function 434 * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate.
670 * @fc: associated fcoe_softc 435 * @fip: FCoE controller.
671 * @fp: the recieved frame 436 * @skb: FCoE ELS frame including FC header but no FCoE headers.
672 * @sa: the source address of this flogi
673 * 437 *
674 * This is responsible to parse the flogi response and sets the corresponding 438 * Returns a non-zero error code if the frame should not be sent.
675 * mac address for the initiator, eitehr OUI based or GW based. 439 * Returns zero if the caller should send the frame with FCoE encapsulation.
676 * 440 *
677 * Returns: none 441 * The caller must check that the length is a multiple of 4.
442 * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
678 */ 443 */
679static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) 444int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
680{ 445{
681 struct fc_frame_header *fh; 446 struct fc_frame_header *fh;
447 u16 old_xid;
682 u8 op; 448 u8 op;
683 449
684 fh = fc_frame_header_get(fp); 450 if (fip->state == FIP_ST_NON_FIP)
685 if (fh->fh_type != FC_TYPE_ELS) 451 return 0;
686 return;
687 op = fc_frame_payload_op(fp);
688 if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
689 fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
690 /*
691 * FLOGI accepted.
692 * If the src mac addr is FC_OUI-based, then we mark the
693 * address_mode flag to use FC_OUI-based Ethernet DA.
694 * Otherwise we use the FCoE gateway addr
695 */
696 if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
697 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
698 } else {
699 memcpy(fc->dest_addr, sa, ETH_ALEN);
700 fc->address_mode = FCOE_GW_ADDR_MODE;
701 }
702 452
453 fh = (struct fc_frame_header *)skb->data;
454 op = *(u8 *)(fh + 1);
455
456 switch (op) {
457 case ELS_FLOGI:
458 old_xid = fip->flogi_oxid;
459 fip->flogi_oxid = ntohs(fh->fh_ox_id);
460 if (fip->state == FIP_ST_AUTO) {
461 if (old_xid == FC_XID_UNKNOWN)
462 fip->flogi_count = 0;
463 fip->flogi_count++;
464 if (fip->flogi_count < 3)
465 goto drop;
466 fip->map_dest = 1;
467 return 0;
468 }
469 op = FIP_DT_FLOGI;
470 break;
471 case ELS_FDISC:
472 if (ntoh24(fh->fh_s_id))
473 return 0;
474 op = FIP_DT_FDISC;
475 break;
476 case ELS_LOGO:
477 if (fip->state != FIP_ST_ENABLED)
478 return 0;
479 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
480 return 0;
481 op = FIP_DT_LOGO;
482 break;
483 case ELS_LS_ACC:
484 if (fip->flogi_oxid == FC_XID_UNKNOWN)
485 return 0;
486 if (!ntoh24(fh->fh_s_id))
487 return 0;
488 if (fip->state == FIP_ST_AUTO)
489 return 0;
703 /* 490 /*
704 * Remove any previously-set unicast MAC filter. 491 * Here we must've gotten an SID by accepting an FLOGI
705 * Add secondary FCoE MAC address filter for our OUI. 492 * from a point-to-point connection. Switch to using
706 */ 493 * the source mac based on the SID. The destination
707 rtnl_lock(); 494 * MAC in this case would have been set by receving the
708 if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) 495 * FLOGI.
709 dev_unicast_delete(fc->real_dev, fc->data_src_addr,
710 ETH_ALEN);
711 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
712 dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
713 rtnl_unlock();
714
715 fc->flogi_progress = 0;
716 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
717 /*
718 * Save source MAC for point-to-point responses.
719 */ 496 */
720 memcpy(fc->dest_addr, sa, ETH_ALEN); 497 fip->flogi_oxid = FC_XID_UNKNOWN;
721 fc->address_mode = FCOE_GW_ADDR_MODE; 498 fc_fcoe_set_mac(fip->data_src_addr, fh->fh_s_id);
499 return 0;
500 default:
501 if (fip->state != FIP_ST_ENABLED)
502 goto drop;
503 return 0;
722 } 504 }
505 if (fcoe_ctlr_encaps(fip, op, skb))
506 goto drop;
507 fip->send(fip, skb);
508 return -EINPROGRESS;
509drop:
510 kfree_skb(skb);
511 return -EINVAL;
723} 512}
513EXPORT_SYMBOL(fcoe_ctlr_els_send);
724 514
725/** 515/*
726 * fcoe_watchdog() - fcoe timer callback 516 * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller.
727 * @vp: 517 * @fip: FCoE controller.
728 * 518 *
729 * This checks the pending queue length for fcoe and set lport qfull 519 * Called with lock held.
730 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
731 * fcoe_hostlist.
732 * 520 *
733 * Returns: 0 for success 521 * An FCF is considered old if we have missed three advertisements.
522 * That is, there have been no valid advertisement from it for three
523 * times its keep-alive period including fuzz.
524 *
525 * In addition, determine the time when an FCF selection can occur.
734 */ 526 */
735void fcoe_watchdog(ulong vp) 527static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
736{ 528{
737 struct fcoe_softc *fc; 529 struct fcoe_fcf *fcf;
738 530 struct fcoe_fcf *next;
739 read_lock(&fcoe_hostlist_lock); 531 unsigned long sel_time = 0;
740 list_for_each_entry(fc, &fcoe_hostlist, list) { 532
741 if (fc->lp) 533 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
742 fcoe_check_wait_queue(fc->lp); 534 if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
535 msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
536 if (fip->sel_fcf == fcf)
537 fip->sel_fcf = NULL;
538 list_del(&fcf->list);
539 WARN_ON(!fip->fcf_count);
540 fip->fcf_count--;
541 kfree(fcf);
542 } else if (fcoe_ctlr_mtu_valid(fcf) &&
543 (!sel_time || time_before(sel_time, fcf->time))) {
544 sel_time = fcf->time;
545 }
546 }
547 if (sel_time) {
548 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
549 fip->sel_time = sel_time;
550 if (time_before(sel_time, fip->timer.expires))
551 mod_timer(&fip->timer, sel_time);
552 } else {
553 fip->sel_time = 0;
743 } 554 }
744 read_unlock(&fcoe_hostlist_lock);
745
746 fcoe_timer.expires = jiffies + (1 * HZ);
747 add_timer(&fcoe_timer);
748} 555}
749 556
750
751/** 557/**
752 * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue 558 * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry.
753 * @lp: the fc_port for this skb 559 * @skb: received FIP advertisement frame
754 * @skb: the associated skb to be xmitted 560 * @fcf: resulting FCF entry.
755 * 561 *
756 * This empties the wait_queue, dequeue the head of the wait_queue queue 562 * Returns zero on a valid parsed advertisement,
757 * and calls fcoe_start_io() for each packet, if all skb have been 563 * otherwise returns non zero value.
758 * transmitted, return qlen or -1 if a error occurs, then restore
759 * wait_queue and try again later.
760 *
761 * The wait_queue is used when the skb transmit fails. skb will go
762 * in the wait_queue which will be emptied by the time function OR
763 * by the next skb transmit.
764 *
765 * Returns: 0 for success
766 */ 564 */
767static int fcoe_check_wait_queue(struct fc_lport *lp) 565static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
768{ 566{
769 struct fcoe_softc *fc = lport_priv(lp); 567 struct fip_header *fiph;
770 struct sk_buff *skb; 568 struct fip_desc *desc = NULL;
771 int rc = -1; 569 struct fip_wwn_desc *wwn;
772 570 struct fip_fab_desc *fab;
773 spin_lock_bh(&fc->fcoe_pending_queue.lock); 571 struct fip_fka_desc *fka;
774 if (fc->fcoe_pending_queue_active) 572 unsigned long t;
775 goto out; 573 size_t rlen;
776 fc->fcoe_pending_queue_active = 1; 574 size_t dlen;
777 575
778 while (fc->fcoe_pending_queue.qlen) { 576 memset(fcf, 0, sizeof(*fcf));
779 /* keep qlen > 0 until fcoe_start_io succeeds */ 577 fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA);
780 fc->fcoe_pending_queue.qlen++; 578
781 skb = __skb_dequeue(&fc->fcoe_pending_queue); 579 fiph = (struct fip_header *)skb->data;
782 580 fcf->flags = ntohs(fiph->fip_flags);
783 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 581
784 rc = fcoe_start_io(skb); 582 rlen = ntohs(fiph->fip_dl_len) * 4;
785 spin_lock_bh(&fc->fcoe_pending_queue.lock); 583 if (rlen + sizeof(*fiph) > skb->len)
786 584 return -EINVAL;
787 if (rc) { 585
788 __skb_queue_head(&fc->fcoe_pending_queue, skb); 586 desc = (struct fip_desc *)(fiph + 1);
789 /* undo temporary increment above */ 587 while (rlen > 0) {
790 fc->fcoe_pending_queue.qlen--; 588 dlen = desc->fip_dlen * FIP_BPW;
589 if (dlen < sizeof(*desc) || dlen > rlen)
590 return -EINVAL;
591 switch (desc->fip_dtype) {
592 case FIP_DT_PRI:
593 if (dlen != sizeof(struct fip_pri_desc))
594 goto len_err;
595 fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri;
791 break; 596 break;
597 case FIP_DT_MAC:
598 if (dlen != sizeof(struct fip_mac_desc))
599 goto len_err;
600 memcpy(fcf->fcf_mac,
601 ((struct fip_mac_desc *)desc)->fd_mac,
602 ETH_ALEN);
603 if (!is_valid_ether_addr(fcf->fcf_mac)) {
604 FIP_DBG("invalid MAC addr in FIP adv\n");
605 return -EINVAL;
606 }
607 break;
608 case FIP_DT_NAME:
609 if (dlen != sizeof(struct fip_wwn_desc))
610 goto len_err;
611 wwn = (struct fip_wwn_desc *)desc;
612 fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn);
613 break;
614 case FIP_DT_FAB:
615 if (dlen != sizeof(struct fip_fab_desc))
616 goto len_err;
617 fab = (struct fip_fab_desc *)desc;
618 fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn);
619 fcf->vfid = ntohs(fab->fd_vfid);
620 fcf->fc_map = ntoh24(fab->fd_map);
621 break;
622 case FIP_DT_FKA:
623 if (dlen != sizeof(struct fip_fka_desc))
624 goto len_err;
625 fka = (struct fip_fka_desc *)desc;
626 t = ntohl(fka->fd_fka_period);
627 if (t >= FCOE_CTLR_MIN_FKA)
628 fcf->fka_period = msecs_to_jiffies(t);
629 break;
630 case FIP_DT_MAP_OUI:
631 case FIP_DT_FCOE_SIZE:
632 case FIP_DT_FLOGI:
633 case FIP_DT_FDISC:
634 case FIP_DT_LOGO:
635 case FIP_DT_ELP:
636 default:
637 FIP_DBG("unexpected descriptor type %x in FIP adv\n",
638 desc->fip_dtype);
639 /* standard says ignore unknown descriptors >= 128 */
640 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
641 return -EINVAL;
642 continue;
792 } 643 }
793 /* undo temporary increment above */ 644 desc = (struct fip_desc *)((char *)desc + dlen);
794 fc->fcoe_pending_queue.qlen--; 645 rlen -= dlen;
795 } 646 }
647 if (!fcf->fc_map || (fcf->fc_map & 0x10000))
648 return -EINVAL;
649 if (!fcf->switch_name || !fcf->fabric_name)
650 return -EINVAL;
651 return 0;
796 652
797 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 653len_err:
798 lp->qfull = 0; 654 FIP_DBG("FIP length error in descriptor type %x len %zu\n",
799 fc->fcoe_pending_queue_active = 0; 655 desc->fip_dtype, dlen);
800 rc = fc->fcoe_pending_queue.qlen; 656 return -EINVAL;
801out:
802 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
803 return rc;
804} 657}
805 658
806/** 659/**
807 * fcoe_dev_setup() - setup link change notification interface 660 * fcoe_ctlr_recv_adv() - Handle an incoming advertisement.
661 * @fip: FCoE controller.
662 * @skb: Received FIP packet.
808 */ 663 */
809static void fcoe_dev_setup() 664static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
810{ 665{
666 struct fcoe_fcf *fcf;
667 struct fcoe_fcf new;
668 struct fcoe_fcf *found;
669 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
670 int first = 0;
671 int mtu_valid;
672
673 if (fcoe_ctlr_parse_adv(skb, &new))
674 return;
675
676 spin_lock_bh(&fip->lock);
677 first = list_empty(&fip->fcfs);
678 found = NULL;
679 list_for_each_entry(fcf, &fip->fcfs, list) {
680 if (fcf->switch_name == new.switch_name &&
681 fcf->fabric_name == new.fabric_name &&
682 fcf->fc_map == new.fc_map &&
683 compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
684 found = fcf;
685 break;
686 }
687 }
688 if (!found) {
689 if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT)
690 goto out;
691
692 fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC);
693 if (!fcf)
694 goto out;
695
696 fip->fcf_count++;
697 memcpy(fcf, &new, sizeof(new));
698 list_add(&fcf->list, &fip->fcfs);
699 } else {
700 /*
701 * Flags in advertisements are ignored once the FCF is
702 * selected. Flags in unsolicited advertisements are
703 * ignored after a usable solicited advertisement
704 * has been received.
705 */
706 if (fcf == fip->sel_fcf) {
707 fip->ctlr_ka_time -= fcf->fka_period;
708 fip->ctlr_ka_time += new.fka_period;
709 if (time_before(fip->ctlr_ka_time, fip->timer.expires))
710 mod_timer(&fip->timer, fip->ctlr_ka_time);
711 } else if (!fcoe_ctlr_fcf_usable(fcf))
712 fcf->flags = new.flags;
713 fcf->fka_period = new.fka_period;
714 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
715 }
716 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
717 fcf->time = jiffies;
718 FIP_DBG_LVL(found ? 2 : 1, "%s FCF for fab %llx map %x val %d\n",
719 found ? "old" : "new",
720 fcf->fabric_name, fcf->fc_map, mtu_valid);
721
811 /* 722 /*
812 * here setup a interface specific wd time to 723 * If this advertisement is not solicited and our max receive size
813 * monitor the link state 724 * hasn't been verified, send a solicited advertisement.
814 */ 725 */
815 register_netdevice_notifier(&fcoe_notifier); 726 if (!mtu_valid)
816} 727 fcoe_ctlr_solicit(fip, fcf);
817 728
818/** 729 /*
819 * fcoe_dev_setup() - cleanup link change notification interface 730 * If its been a while since we did a solicit, and this is
820 */ 731 * the first advertisement we've received, do a multicast
821static void fcoe_dev_cleanup(void) 732 * solicitation to gather as many advertisements as we can
822{ 733 * before selection occurs.
823 unregister_netdevice_notifier(&fcoe_notifier); 734 */
735 if (first && time_after(jiffies, fip->sol_time + sol_tov))
736 fcoe_ctlr_solicit(fip, NULL);
737
738 /*
739 * If this is the first validated FCF, note the time and
740 * set a timer to trigger selection.
741 */
742 if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) {
743 fip->sel_time = jiffies +
744 msecs_to_jiffies(FCOE_CTLR_START_DELAY);
745 if (!timer_pending(&fip->timer) ||
746 time_before(fip->sel_time, fip->timer.expires))
747 mod_timer(&fip->timer, fip->sel_time);
748 }
749out:
750 spin_unlock_bh(&fip->lock);
824} 751}
825 752
826/** 753/**
827 * fcoe_device_notification() - netdev event notification callback 754 * fcoe_ctlr_recv_els() - Handle an incoming FIP-encapsulated ELS frame.
828 * @notifier: context of the notification 755 * @fip: FCoE controller.
829 * @event: type of event 756 * @skb: Received FIP packet.
830 * @ptr: fixed array for output parsed ifname
831 *
832 * This function is called by the ethernet driver in case of link change event
833 *
834 * Returns: 0 for success
835 */ 757 */
836static int fcoe_device_notification(struct notifier_block *notifier, 758static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
837 ulong event, void *ptr)
838{ 759{
839 struct fc_lport *lp = NULL; 760 struct fc_lport *lp = fip->lp;
840 struct net_device *real_dev = ptr; 761 struct fip_header *fiph;
841 struct fcoe_softc *fc; 762 struct fc_frame *fp;
763 struct fc_frame_header *fh = NULL;
764 struct fip_desc *desc;
765 struct fip_encaps *els;
842 struct fcoe_dev_stats *stats; 766 struct fcoe_dev_stats *stats;
843 u32 new_link_up; 767 enum fip_desc_type els_dtype = 0;
844 u32 mfs; 768 u8 els_op;
845 int rc = NOTIFY_OK; 769 u8 sub;
846 770 u8 granted_mac[ETH_ALEN] = { 0 };
847 read_lock(&fcoe_hostlist_lock); 771 size_t els_len = 0;
848 list_for_each_entry(fc, &fcoe_hostlist, list) { 772 size_t rlen;
849 if (fc->real_dev == real_dev) { 773 size_t dlen;
850 lp = fc->lp; 774
775 fiph = (struct fip_header *)skb->data;
776 sub = fiph->fip_subcode;
777 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
778 goto drop;
779
780 rlen = ntohs(fiph->fip_dl_len) * 4;
781 if (rlen + sizeof(*fiph) > skb->len)
782 goto drop;
783
784 desc = (struct fip_desc *)(fiph + 1);
785 while (rlen > 0) {
786 dlen = desc->fip_dlen * FIP_BPW;
787 if (dlen < sizeof(*desc) || dlen > rlen)
788 goto drop;
789 switch (desc->fip_dtype) {
790 case FIP_DT_MAC:
791 if (dlen != sizeof(struct fip_mac_desc))
792 goto len_err;
793 memcpy(granted_mac,
794 ((struct fip_mac_desc *)desc)->fd_mac,
795 ETH_ALEN);
796 if (!is_valid_ether_addr(granted_mac)) {
797 FIP_DBG("invalid MAC addrs in FIP ELS\n");
798 goto drop;
799 }
851 break; 800 break;
801 case FIP_DT_FLOGI:
802 case FIP_DT_FDISC:
803 case FIP_DT_LOGO:
804 case FIP_DT_ELP:
805 if (fh)
806 goto drop;
807 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
808 goto len_err;
809 els_len = dlen - sizeof(*els);
810 els = (struct fip_encaps *)desc;
811 fh = (struct fc_frame_header *)(els + 1);
812 els_dtype = desc->fip_dtype;
813 break;
814 default:
815 FIP_DBG("unexpected descriptor type %x "
816 "in FIP adv\n", desc->fip_dtype);
817 /* standard says ignore unknown descriptors >= 128 */
818 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
819 goto drop;
820 continue;
852 } 821 }
853 } 822 desc = (struct fip_desc *)((char *)desc + dlen);
854 read_unlock(&fcoe_hostlist_lock); 823 rlen -= dlen;
855 if (lp == NULL) {
856 rc = NOTIFY_DONE;
857 goto out;
858 } 824 }
859 825
860 new_link_up = lp->link_up; 826 if (!fh)
861 switch (event) { 827 goto drop;
862 case NETDEV_DOWN: 828 els_op = *(u8 *)(fh + 1);
863 case NETDEV_GOING_DOWN:
864 new_link_up = 0;
865 break;
866 case NETDEV_UP:
867 case NETDEV_CHANGE:
868 new_link_up = !fcoe_link_ok(lp);
869 break;
870 case NETDEV_CHANGEMTU:
871 mfs = fc->real_dev->mtu -
872 (sizeof(struct fcoe_hdr) +
873 sizeof(struct fcoe_crc_eof));
874 if (mfs >= FC_MIN_MAX_FRAME)
875 fc_set_mfs(lp, mfs);
876 new_link_up = !fcoe_link_ok(lp);
877 break;
878 case NETDEV_REGISTER:
879 break;
880 default:
881 FC_DBG("unknown event %ld call", event);
882 }
883 if (lp->link_up != new_link_up) {
884 if (new_link_up)
885 fc_linkup(lp);
886 else {
887 stats = lp->dev_stats[smp_processor_id()];
888 if (stats)
889 stats->LinkFailureCount++;
890 fc_linkdown(lp);
891 fcoe_clean_pending_queue(lp);
892 }
893 }
894out:
895 return rc;
896}
897 829
898/** 830 if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP &&
899 * fcoe_if_to_netdev() - parse a name buffer to get netdev 831 fip->flogi_oxid == ntohs(fh->fh_ox_id) &&
900 * @ifname: fixed array for output parsed ifname 832 els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) {
901 * @buffer: incoming buffer to be copied 833 fip->flogi_oxid = FC_XID_UNKNOWN;
902 * 834 fip->update_mac(fip, fip->data_src_addr, granted_mac);
903 * Returns: NULL or ptr to netdeive 835 memcpy(fip->data_src_addr, granted_mac, ETH_ALEN);
904 */
905static struct net_device *fcoe_if_to_netdev(const char *buffer)
906{
907 char *cp;
908 char ifname[IFNAMSIZ + 2];
909
910 if (buffer) {
911 strlcpy(ifname, buffer, IFNAMSIZ);
912 cp = ifname + strlen(ifname);
913 while (--cp >= ifname && *cp == '\n')
914 *cp = '\0';
915 return dev_get_by_name(&init_net, ifname);
916 } 836 }
917 return NULL; 837
838 /*
839 * Convert skb into an fc_frame containing only the ELS.
840 */
841 skb_pull(skb, (u8 *)fh - skb->data);
842 skb_trim(skb, els_len);
843 fp = (struct fc_frame *)skb;
844 fc_frame_init(fp);
845 fr_sof(fp) = FC_SOF_I3;
846 fr_eof(fp) = FC_EOF_T;
847 fr_dev(fp) = lp;
848
849 stats = fc_lport_get_stats(lp);
850 stats->RxFrames++;
851 stats->RxWords += skb->len / FIP_BPW;
852
853 fc_exch_recv(lp, lp->emp, fp);
854 return;
855
856len_err:
857 FIP_DBG("FIP length error in descriptor type %x len %zu\n",
858 desc->fip_dtype, dlen);
859drop:
860 kfree_skb(skb);
918} 861}
919 862
920/** 863/**
921 * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev 864 * fcoe_ctlr_recv_els() - Handle an incoming link reset frame.
922 * @netdev: the target netdev 865 * @fip: FCoE controller.
866 * @fh: Received FIP header.
923 * 867 *
924 * Returns: ptr to the struct module, NULL for failure 868 * There may be multiple VN_Port descriptors.
869 * The overall length has already been checked.
925 */ 870 */
926static struct module * 871static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
927fcoe_netdev_to_module_owner(const struct net_device *netdev) 872 struct fip_header *fh)
928{ 873{
929 struct device *dev; 874 struct fip_desc *desc;
930 875 struct fip_mac_desc *mp;
931 if (!netdev) 876 struct fip_wwn_desc *wp;
932 return NULL; 877 struct fip_vn_desc *vp;
933 878 size_t rlen;
934 dev = netdev->dev.parent; 879 size_t dlen;
935 if (!dev) 880 struct fcoe_fcf *fcf = fip->sel_fcf;
936 return NULL; 881 struct fc_lport *lp = fip->lp;
882 u32 desc_mask;
883
884 FIP_DBG("Clear Virtual Link received\n");
885 if (!fcf)
886 return;
887 if (!fcf || !fc_host_port_id(lp->host))
888 return;
937 889
938 if (!dev->driver) 890 /*
939 return NULL; 891 * mask of required descriptors. Validating each one clears its bit.
892 */
893 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
894
895 rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
896 desc = (struct fip_desc *)(fh + 1);
897 while (rlen >= sizeof(*desc)) {
898 dlen = desc->fip_dlen * FIP_BPW;
899 if (dlen > rlen)
900 return;
901 switch (desc->fip_dtype) {
902 case FIP_DT_MAC:
903 mp = (struct fip_mac_desc *)desc;
904 if (dlen < sizeof(*mp))
905 return;
906 if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
907 return;
908 desc_mask &= ~BIT(FIP_DT_MAC);
909 break;
910 case FIP_DT_NAME:
911 wp = (struct fip_wwn_desc *)desc;
912 if (dlen < sizeof(*wp))
913 return;
914 if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
915 return;
916 desc_mask &= ~BIT(FIP_DT_NAME);
917 break;
918 case FIP_DT_VN_ID:
919 vp = (struct fip_vn_desc *)desc;
920 if (dlen < sizeof(*vp))
921 return;
922 if (compare_ether_addr(vp->fd_mac,
923 fip->data_src_addr) == 0 &&
924 get_unaligned_be64(&vp->fd_wwpn) == lp->wwpn &&
925 ntoh24(vp->fd_fc_id) == fc_host_port_id(lp->host))
926 desc_mask &= ~BIT(FIP_DT_VN_ID);
927 break;
928 default:
929 /* standard says ignore unknown descriptors >= 128 */
930 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
931 return;
932 break;
933 }
934 desc = (struct fip_desc *)((char *)desc + dlen);
935 rlen -= dlen;
936 }
940 937
941 return dev->driver->owner; 938 /*
939 * reset only if all required descriptors were present and valid.
940 */
941 if (desc_mask) {
942 FIP_DBG("missing descriptors mask %x\n", desc_mask);
943 } else {
944 FIP_DBG("performing Clear Virtual Link\n");
945 fcoe_ctlr_reset(fip, FIP_ST_ENABLED);
946 }
942} 947}
943 948
944/** 949/**
945 * fcoe_ethdrv_get() - Hold the Ethernet driver 950 * fcoe_ctlr_recv() - Receive a FIP frame.
946 * @netdev: the target netdev 951 * @fip: FCoE controller.
952 * @skb: Received FIP packet.
947 * 953 *
948 * Holds the Ethernet driver module by try_module_get() for 954 * This is called from NET_RX_SOFTIRQ.
949 * the corresponding netdev.
950 *
951 * Returns: 0 for succsss
952 */ 955 */
953static int fcoe_ethdrv_get(const struct net_device *netdev) 956void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
954{ 957{
955 struct module *owner; 958 spin_lock_bh(&fip->fip_recv_list.lock);
956 959 __skb_queue_tail(&fip->fip_recv_list, skb);
957 owner = fcoe_netdev_to_module_owner(netdev); 960 spin_unlock_bh(&fip->fip_recv_list.lock);
958 if (owner) { 961 schedule_work(&fip->recv_work);
959 printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
960 module_name(owner), netdev->name);
961 return try_module_get(owner);
962 }
963 return -ENODEV;
964} 962}
963EXPORT_SYMBOL(fcoe_ctlr_recv);
965 964
966/** 965/**
967 * fcoe_ethdrv_put() - Release the Ethernet driver 966 * fcoe_ctlr_recv_handler() - Receive a FIP frame.
968 * @netdev: the target netdev 967 * @fip: FCoE controller.
968 * @skb: Received FIP packet.
969 * 969 *
970 * Releases the Ethernet driver module by module_put for 970 * Returns non-zero if the frame is dropped.
971 * the corresponding netdev.
972 *
973 * Returns: 0 for succsss
974 */ 971 */
975static int fcoe_ethdrv_put(const struct net_device *netdev) 972static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
976{ 973{
977 struct module *owner; 974 struct fip_header *fiph;
975 struct ethhdr *eh;
976 enum fip_state state;
977 u16 op;
978 u8 sub;
979
980 if (skb_linearize(skb))
981 goto drop;
982 if (skb->len < sizeof(*fiph))
983 goto drop;
984 eh = eth_hdr(skb);
985 if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
986 compare_ether_addr(eh->h_dest, FIP_ALL_ENODE_MACS))
987 goto drop;
988 fiph = (struct fip_header *)skb->data;
989 op = ntohs(fiph->fip_op);
990 sub = fiph->fip_subcode;
991
992 FIP_DBG_LVL(2, "ver %x op %x/%x dl %x fl %x\n",
993 FIP_VER_DECAPS(fiph->fip_ver), op, sub,
994 ntohs(fiph->fip_dl_len), ntohs(fiph->fip_flags));
995
996 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
997 goto drop;
998 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
999 goto drop;
1000
1001 spin_lock_bh(&fip->lock);
1002 state = fip->state;
1003 if (state == FIP_ST_AUTO) {
1004 fip->map_dest = 0;
1005 fip->state = FIP_ST_ENABLED;
1006 state = FIP_ST_ENABLED;
1007 FIP_DBG("using FIP mode\n");
1008 }
1009 spin_unlock_bh(&fip->lock);
1010 if (state != FIP_ST_ENABLED)
1011 goto drop;
978 1012
979 owner = fcoe_netdev_to_module_owner(netdev); 1013 if (op == FIP_OP_LS) {
980 if (owner) { 1014 fcoe_ctlr_recv_els(fip, skb); /* consumes skb */
981 printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
982 module_name(owner), netdev->name);
983 module_put(owner);
984 return 0; 1015 return 0;
985 } 1016 }
986 return -ENODEV; 1017 if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
1018 fcoe_ctlr_recv_adv(fip, skb);
1019 else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
1020 fcoe_ctlr_recv_clr_vlink(fip, fiph);
1021 kfree_skb(skb);
1022 return 0;
1023drop:
1024 kfree_skb(skb);
1025 return -1;
987} 1026}
988 1027
989/** 1028/**
990 * fcoe_destroy() - handles the destroy from sysfs 1029 * fcoe_ctlr_select() - Select the best FCF, if possible.
991 * @buffer: expcted to be a eth if name 1030 * @fip: FCoE controller.
992 * @kp: associated kernel param 1031 *
1032 * If there are conflicting advertisements, no FCF can be chosen.
993 * 1033 *
994 * Returns: 0 for success 1034 * Called with lock held.
995 */ 1035 */
996static int fcoe_destroy(const char *buffer, struct kernel_param *kp) 1036static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
997{ 1037{
998 int rc; 1038 struct fcoe_fcf *fcf;
999 struct net_device *netdev; 1039 struct fcoe_fcf *best = NULL;
1000 1040
1001 netdev = fcoe_if_to_netdev(buffer); 1041 list_for_each_entry(fcf, &fip->fcfs, list) {
1002 if (!netdev) { 1042 FIP_DBG("consider FCF for fab %llx VFID %d map %x val %d\n",
1003 rc = -ENODEV; 1043 fcf->fabric_name, fcf->vfid,
1004 goto out_nodev; 1044 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
1005 } 1045 if (!fcoe_ctlr_fcf_usable(fcf)) {
1006 /* look for existing lport */ 1046 FIP_DBG("FCF for fab %llx map %x %svalid %savailable\n",
1007 if (!fcoe_hostlist_lookup(netdev)) { 1047 fcf->fabric_name, fcf->fc_map,
1008 rc = -ENODEV; 1048 (fcf->flags & FIP_FL_SOL) ? "" : "in",
1009 goto out_putdev; 1049 (fcf->flags & FIP_FL_AVAIL) ? "" : "un");
1010 } 1050 continue;
1011 /* pass to transport */ 1051 }
1012 rc = fcoe_transport_release(netdev); 1052 if (!best) {
1013 if (rc) { 1053 best = fcf;
1014 printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n", 1054 continue;
1015 netdev->name); 1055 }
1016 rc = -EIO; 1056 if (fcf->fabric_name != best->fabric_name ||
1017 goto out_putdev; 1057 fcf->vfid != best->vfid ||
1058 fcf->fc_map != best->fc_map) {
1059 FIP_DBG("conflicting fabric, VFID, or FC-MAP\n");
1060 return;
1061 }
1062 if (fcf->pri < best->pri)
1063 best = fcf;
1018 } 1064 }
1019 fcoe_ethdrv_put(netdev); 1065 fip->sel_fcf = best;
1020 rc = 0;
1021out_putdev:
1022 dev_put(netdev);
1023out_nodev:
1024 return rc;
1025} 1066}
1026 1067
1027/** 1068/**
1028 * fcoe_create() - Handles the create call from sysfs 1069 * fcoe_ctlr_timeout() - FIP timer function.
1029 * @buffer: expcted to be a eth if name 1070 * @arg: &fcoe_ctlr pointer.
1030 * @kp: associated kernel param
1031 * 1071 *
1032 * Returns: 0 for success 1072 * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives.
1033 */ 1073 */
1034static int fcoe_create(const char *buffer, struct kernel_param *kp) 1074static void fcoe_ctlr_timeout(unsigned long arg)
1035{ 1075{
1036 int rc; 1076 struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
1037 struct net_device *netdev; 1077 struct fcoe_fcf *sel;
1038 1078 struct fcoe_fcf *fcf;
1039 netdev = fcoe_if_to_netdev(buffer); 1079 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
1040 if (!netdev) { 1080 DECLARE_MAC_BUF(buf);
1041 rc = -ENODEV; 1081 u8 send_ctlr_ka;
1042 goto out_nodev; 1082 u8 send_port_ka;
1083
1084 spin_lock_bh(&fip->lock);
1085 if (fip->state == FIP_ST_DISABLED) {
1086 spin_unlock_bh(&fip->lock);
1087 return;
1043 } 1088 }
1044 /* look for existing lport */ 1089
1045 if (fcoe_hostlist_lookup(netdev)) { 1090 fcf = fip->sel_fcf;
1046 rc = -EEXIST; 1091 fcoe_ctlr_age_fcfs(fip);
1047 goto out_putdev; 1092
1093 sel = fip->sel_fcf;
1094 if (!sel && fip->sel_time && time_after_eq(jiffies, fip->sel_time)) {
1095 fcoe_ctlr_select(fip);
1096 sel = fip->sel_fcf;
1097 fip->sel_time = 0;
1048 } 1098 }
1049 fcoe_ethdrv_get(netdev); 1099
1050 1100 if (sel != fcf) {
1051 /* pass to transport */ 1101 fcf = sel; /* the old FCF may have been freed */
1052 rc = fcoe_transport_attach(netdev); 1102 if (sel) {
1053 if (rc) { 1103 printk(KERN_INFO "host%d: FIP selected "
1054 printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n", 1104 "Fibre-Channel Forwarder MAC %s\n",
1055 netdev->name); 1105 fip->lp->host->host_no,
1056 fcoe_ethdrv_put(netdev); 1106 print_mac(buf, sel->fcf_mac));
1057 rc = -EIO; 1107 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
1058 goto out_putdev; 1108 fip->port_ka_time = jiffies +
1109 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1110 fip->ctlr_ka_time = jiffies + sel->fka_period;
1111 fip->link = 1;
1112 } else {
1113 printk(KERN_NOTICE "host%d: "
1114 "FIP Fibre-Channel Forwarder timed out. "
1115 "Starting FCF discovery.\n",
1116 fip->lp->host->host_no);
1117 fip->link = 0;
1118 }
1119 schedule_work(&fip->link_work);
1059 } 1120 }
1060 rc = 0;
1061out_putdev:
1062 dev_put(netdev);
1063out_nodev:
1064 return rc;
1065}
1066 1121
1067module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); 1122 send_ctlr_ka = 0;
1068__MODULE_PARM_TYPE(create, "string"); 1123 send_port_ka = 0;
1069MODULE_PARM_DESC(create, "Create fcoe port using net device passed in."); 1124 if (sel) {
1070module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); 1125 if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
1071__MODULE_PARM_TYPE(destroy, "string"); 1126 fip->ctlr_ka_time = jiffies + sel->fka_period;
1072MODULE_PARM_DESC(destroy, "Destroy fcoe port"); 1127 send_ctlr_ka = 1;
1128 }
1129 if (time_after(next_timer, fip->ctlr_ka_time))
1130 next_timer = fip->ctlr_ka_time;
1073 1131
1074/** 1132 if (time_after_eq(jiffies, fip->port_ka_time)) {
1075 * fcoe_link_ok() - Check if link is ok for the fc_lport 1133 fip->port_ka_time += jiffies +
1076 * @lp: ptr to the fc_lport 1134 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1077 * 1135 send_port_ka = 1;
1078 * Any permanently-disqualifying conditions have been previously checked.
1079 * This also updates the speed setting, which may change with link for 100/1000.
1080 *
1081 * This function should probably be checking for PAUSE support at some point
1082 * in the future. Currently Per-priority-pause is not determinable using
1083 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1084 *
1085 * Returns: 0 if link is OK for use by FCoE.
1086 *
1087 */
1088int fcoe_link_ok(struct fc_lport *lp)
1089{
1090 struct fcoe_softc *fc = lport_priv(lp);
1091 struct net_device *dev = fc->real_dev;
1092 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1093 int rc = 0;
1094
1095 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1096 dev = fc->phys_dev;
1097 if (dev->ethtool_ops->get_settings) {
1098 dev->ethtool_ops->get_settings(dev, &ecmd);
1099 lp->link_supported_speeds &=
1100 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1101 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1102 SUPPORTED_1000baseT_Full))
1103 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1104 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1105 lp->link_supported_speeds |=
1106 FC_PORTSPEED_10GBIT;
1107 if (ecmd.speed == SPEED_1000)
1108 lp->link_speed = FC_PORTSPEED_1GBIT;
1109 if (ecmd.speed == SPEED_10000)
1110 lp->link_speed = FC_PORTSPEED_10GBIT;
1111 } 1136 }
1112 } else 1137 if (time_after(next_timer, fip->port_ka_time))
1113 rc = -1; 1138 next_timer = fip->port_ka_time;
1139 mod_timer(&fip->timer, next_timer);
1140 } else if (fip->sel_time) {
1141 next_timer = fip->sel_time +
1142 msecs_to_jiffies(FCOE_CTLR_START_DELAY);
1143 mod_timer(&fip->timer, next_timer);
1144 }
1145 spin_unlock_bh(&fip->lock);
1114 1146
1115 return rc; 1147 if (send_ctlr_ka)
1148 fcoe_ctlr_send_keep_alive(fip, 0, fip->ctl_src_addr);
1149 if (send_port_ka)
1150 fcoe_ctlr_send_keep_alive(fip, 1, fip->data_src_addr);
1116} 1151}
1117EXPORT_SYMBOL_GPL(fcoe_link_ok);
1118 1152
1119/** 1153/**
1120 * fcoe_percpu_clean() - Clear the pending skbs for an lport 1154 * fcoe_ctlr_link_work() - worker thread function for link changes.
1121 * @lp: the fc_lport 1155 * @work: pointer to link_work member inside &fcoe_ctlr.
1156 *
1157 * See if the link status has changed and if so, report it.
1158 *
1159 * This is here because fc_linkup() and fc_linkdown() must not
1160 * be called from the timer directly, since they use a mutex.
1122 */ 1161 */
1123void fcoe_percpu_clean(struct fc_lport *lp) 1162static void fcoe_ctlr_link_work(struct work_struct *work)
1124{ 1163{
1125 int idx; 1164 struct fcoe_ctlr *fip;
1126 struct fcoe_percpu_s *pp; 1165 int link;
1127 struct fcoe_rcv_info *fr; 1166 int last_link;
1128 struct sk_buff_head *list; 1167
1129 struct sk_buff *skb, *next; 1168 fip = container_of(work, struct fcoe_ctlr, link_work);
1130 struct sk_buff *head; 1169 spin_lock_bh(&fip->lock);
1131 1170 last_link = fip->last_link;
1132 for (idx = 0; idx < NR_CPUS; idx++) { 1171 link = fip->link;
1133 if (fcoe_percpu[idx]) { 1172 fip->last_link = link;
1134 pp = fcoe_percpu[idx]; 1173 spin_unlock_bh(&fip->lock);
1135 spin_lock_bh(&pp->fcoe_rx_list.lock); 1174
1136 list = &pp->fcoe_rx_list; 1175 if (last_link != link) {
1137 head = list->next; 1176 if (link)
1138 for (skb = head; skb != (struct sk_buff *)list; 1177 fc_linkup(fip->lp);
1139 skb = next) { 1178 else
1140 next = skb->next; 1179 fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT);
1141 fr = fcoe_dev_from_skb(skb);
1142 if (fr->fr_dev == lp) {
1143 __skb_unlink(skb, list);
1144 kfree_skb(skb);
1145 }
1146 }
1147 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1148 }
1149 } 1180 }
1150} 1181}
1151EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
1152 1182
1153/** 1183/**
1154 * fcoe_clean_pending_queue() - Dequeue a skb and free it 1184 * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames.
1155 * @lp: the corresponding fc_lport 1185 * @recv_work: pointer to recv_work member inside &fcoe_ctlr.
1156 *
1157 * Returns: none
1158 */ 1186 */
1159void fcoe_clean_pending_queue(struct fc_lport *lp) 1187static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
1160{ 1188{
1161 struct fcoe_softc *fc = lport_priv(lp); 1189 struct fcoe_ctlr *fip;
1162 struct sk_buff *skb; 1190 struct sk_buff *skb;
1163 1191
1164 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1192 fip = container_of(recv_work, struct fcoe_ctlr, recv_work);
1165 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { 1193 spin_lock_bh(&fip->fip_recv_list.lock);
1166 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1194 while ((skb = __skb_dequeue(&fip->fip_recv_list))) {
1167 kfree_skb(skb); 1195 spin_unlock_bh(&fip->fip_recv_list.lock);
1168 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1196 fcoe_ctlr_recv_handler(fip, skb);
1197 spin_lock_bh(&fip->fip_recv_list.lock);
1169 } 1198 }
1170 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1199 spin_unlock_bh(&fip->fip_recv_list.lock);
1171} 1200}
1172EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
1173 1201
1174/** 1202/**
1175 * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport 1203 * fcoe_ctlr_recv_flogi() - snoop Pre-FIP receipt of FLOGI response or request.
1176 * @sht: ptr to the scsi host templ 1204 * @fip: FCoE controller.
1177 * @priv_size: size of private data after fc_lport 1205 * @fp: FC frame.
1206 * @sa: Ethernet source MAC address from received FCoE frame.
1178 * 1207 *
1179 * Returns: ptr to Scsi_Host 1208 * Snoop potential response to FLOGI or even incoming FLOGI.
1180 * TODO: to libfc?
1181 */
1182static inline struct Scsi_Host *
1183libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
1184{
1185 return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
1186}
1187
1188/**
1189 * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc
1190 * @sht: ptr to the scsi host templ
1191 * @priv_size: size of private data after fc_lport
1192 * 1209 *
1193 * Returns: ptr to Scsi_Host 1210 * The caller has checked that we are waiting for login as indicated
1194 */ 1211 * by fip->flogi_oxid != FC_XID_UNKNOWN.
1195struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size) 1212 *
1196{ 1213 * The caller is responsible for freeing the frame.
1197 return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
1198}
1199EXPORT_SYMBOL_GPL(fcoe_host_alloc);
1200
1201/**
1202 * fcoe_reset() - Resets the fcoe
1203 * @shost: shost the reset is from
1204 * 1214 *
1205 * Returns: always 0 1215 * Return non-zero if the frame should not be delivered to libfc.
1206 */ 1216 */
1207int fcoe_reset(struct Scsi_Host *shost) 1217int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
1208{ 1218{
1209 struct fc_lport *lport = shost_priv(shost); 1219 struct fc_frame_header *fh;
1210 fc_lport_reset(lport); 1220 u8 op;
1221 u8 mac[ETH_ALEN];
1222
1223 fh = fc_frame_header_get(fp);
1224 if (fh->fh_type != FC_TYPE_ELS)
1225 return 0;
1226
1227 op = fc_frame_payload_op(fp);
1228 if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
1229 fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
1230
1231 spin_lock_bh(&fip->lock);
1232 if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) {
1233 spin_unlock_bh(&fip->lock);
1234 return -EINVAL;
1235 }
1236 fip->state = FIP_ST_NON_FIP;
1237 FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
1238
1239 /*
1240 * FLOGI accepted.
1241 * If the src mac addr is FC_OUI-based, then we mark the
1242 * address_mode flag to use FC_OUI-based Ethernet DA.
1243 * Otherwise we use the FCoE gateway addr
1244 */
1245 if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
1246 fip->map_dest = 1;
1247 } else {
1248 memcpy(fip->dest_addr, sa, ETH_ALEN);
1249 fip->map_dest = 0;
1250 }
1251 fip->flogi_oxid = FC_XID_UNKNOWN;
1252 memcpy(mac, fip->data_src_addr, ETH_ALEN);
1253 fc_fcoe_set_mac(fip->data_src_addr, fh->fh_d_id);
1254 spin_unlock_bh(&fip->lock);
1255
1256 fip->update_mac(fip, mac, fip->data_src_addr);
1257 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
1258 /*
1259 * Save source MAC for point-to-point responses.
1260 */
1261 spin_lock_bh(&fip->lock);
1262 if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
1263 memcpy(fip->dest_addr, sa, ETH_ALEN);
1264 fip->map_dest = 0;
1265 if (fip->state == FIP_ST_NON_FIP)
1266 FIP_DBG("received FLOGI REQ, "
1267 "using non-FIP mode\n");
1268 fip->state = FIP_ST_NON_FIP;
1269 }
1270 spin_unlock_bh(&fip->lock);
1271 }
1211 return 0; 1272 return 0;
1212} 1273}
1213EXPORT_SYMBOL_GPL(fcoe_reset); 1274EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
1214 1275
1215/** 1276/**
1216 * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN. 1277 * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN.
@@ -1254,85 +1315,6 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1254EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); 1315EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
1255 1316
1256/** 1317/**
1257 * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
1258 * @device: this is currently ptr to net_device
1259 *
1260 * Returns: NULL or the located fcoe_softc
1261 */
1262static struct fcoe_softc *
1263fcoe_hostlist_lookup_softc(const struct net_device *dev)
1264{
1265 struct fcoe_softc *fc;
1266
1267 read_lock(&fcoe_hostlist_lock);
1268 list_for_each_entry(fc, &fcoe_hostlist, list) {
1269 if (fc->real_dev == dev) {
1270 read_unlock(&fcoe_hostlist_lock);
1271 return fc;
1272 }
1273 }
1274 read_unlock(&fcoe_hostlist_lock);
1275 return NULL;
1276}
1277
1278/**
1279 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1280 * @netdev: ptr to net_device
1281 *
1282 * Returns: 0 for success
1283 */
1284struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1285{
1286 struct fcoe_softc *fc;
1287
1288 fc = fcoe_hostlist_lookup_softc(netdev);
1289
1290 return (fc) ? fc->lp : NULL;
1291}
1292EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
1293
1294/**
1295 * fcoe_hostlist_add() - Add a lport to lports list
1296 * @lp: ptr to the fc_lport to badded
1297 *
1298 * Returns: 0 for success
1299 */
1300int fcoe_hostlist_add(const struct fc_lport *lp)
1301{
1302 struct fcoe_softc *fc;
1303
1304 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1305 if (!fc) {
1306 fc = lport_priv(lp);
1307 write_lock_bh(&fcoe_hostlist_lock);
1308 list_add_tail(&fc->list, &fcoe_hostlist);
1309 write_unlock_bh(&fcoe_hostlist_lock);
1310 }
1311 return 0;
1312}
1313EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
1314
1315/**
1316 * fcoe_hostlist_remove() - remove a lport from lports list
1317 * @lp: ptr to the fc_lport to badded
1318 *
1319 * Returns: 0 for success
1320 */
1321int fcoe_hostlist_remove(const struct fc_lport *lp)
1322{
1323 struct fcoe_softc *fc;
1324
1325 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1326 BUG_ON(!fc);
1327 write_lock_bh(&fcoe_hostlist_lock);
1328 list_del(&fc->list);
1329 write_unlock_bh(&fcoe_hostlist_lock);
1330
1331 return 0;
1332}
1333EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
1334
1335/**
1336 * fcoe_libfc_config() - sets up libfc related properties for lport 1318 * fcoe_libfc_config() - sets up libfc related properties for lport
1337 * @lp: ptr to the fc_lport 1319 * @lp: ptr to the fc_lport
1338 * @tt: libfc function template 1320 * @tt: libfc function template
@@ -1354,121 +1336,3 @@ int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
1354 return 0; 1336 return 0;
1355} 1337}
1356EXPORT_SYMBOL_GPL(fcoe_libfc_config); 1338EXPORT_SYMBOL_GPL(fcoe_libfc_config);
1357
1358/**
1359 * fcoe_init() - fcoe module loading initialization
1360 *
1361 * Initialization routine
1362 * 1. Will create fc transport software structure
1363 * 2. initialize the link list of port information structure
1364 *
1365 * Returns 0 on success, negative on failure
1366 */
1367static int __init fcoe_init(void)
1368{
1369 int cpu;
1370 struct fcoe_percpu_s *p;
1371
1372
1373 INIT_LIST_HEAD(&fcoe_hostlist);
1374 rwlock_init(&fcoe_hostlist_lock);
1375
1376#ifdef CONFIG_HOTPLUG_CPU
1377 register_cpu_notifier(&fcoe_cpu_notifier);
1378#endif /* CONFIG_HOTPLUG_CPU */
1379
1380 /*
1381 * initialize per CPU interrupt thread
1382 */
1383 for_each_online_cpu(cpu) {
1384 p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
1385 if (p) {
1386 p->thread = kthread_create(fcoe_percpu_receive_thread,
1387 (void *)p,
1388 "fcoethread/%d", cpu);
1389
1390 /*
1391 * if there is no error then bind the thread to the cpu
1392 * initialize the semaphore and skb queue head
1393 */
1394 if (likely(!IS_ERR(p->thread))) {
1395 p->cpu = cpu;
1396 fcoe_percpu[cpu] = p;
1397 skb_queue_head_init(&p->fcoe_rx_list);
1398 kthread_bind(p->thread, cpu);
1399 wake_up_process(p->thread);
1400 } else {
1401 fcoe_percpu[cpu] = NULL;
1402 kfree(p);
1403 }
1404 }
1405 }
1406
1407 /*
1408 * setup link change notification
1409 */
1410 fcoe_dev_setup();
1411
1412 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1413
1414 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1415
1416 /* initiatlize the fcoe transport */
1417 fcoe_transport_init();
1418
1419 fcoe_sw_init();
1420
1421 return 0;
1422}
1423module_init(fcoe_init);
1424
1425/**
1426 * fcoe_exit() - fcoe module unloading cleanup
1427 *
1428 * Returns 0 on success, negative on failure
1429 */
1430static void __exit fcoe_exit(void)
1431{
1432 u32 idx;
1433 struct fcoe_softc *fc, *tmp;
1434 struct fcoe_percpu_s *p;
1435 struct sk_buff *skb;
1436
1437 /*
1438 * Stop all call back interfaces
1439 */
1440#ifdef CONFIG_HOTPLUG_CPU
1441 unregister_cpu_notifier(&fcoe_cpu_notifier);
1442#endif /* CONFIG_HOTPLUG_CPU */
1443 fcoe_dev_cleanup();
1444
1445 /*
1446 * stop timer
1447 */
1448 del_timer_sync(&fcoe_timer);
1449
1450 /* releases the associated fcoe transport for each lport */
1451 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1452 fcoe_transport_release(fc->real_dev);
1453
1454 for (idx = 0; idx < NR_CPUS; idx++) {
1455 if (fcoe_percpu[idx]) {
1456 kthread_stop(fcoe_percpu[idx]->thread);
1457 p = fcoe_percpu[idx];
1458 spin_lock_bh(&p->fcoe_rx_list.lock);
1459 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1460 kfree_skb(skb);
1461 spin_unlock_bh(&p->fcoe_rx_list.lock);
1462 if (fcoe_percpu[idx]->crc_eof_page)
1463 put_page(fcoe_percpu[idx]->crc_eof_page);
1464 kfree(fcoe_percpu[idx]);
1465 }
1466 }
1467
1468 /* remove sw trasnport */
1469 fcoe_sw_exit();
1470
1471 /* detach the transport */
1472 fcoe_transport_exit();
1473}
1474module_exit(fcoe_exit);