aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/fnic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/fnic')
-rw-r--r--drivers/scsi/fnic/fnic.h25
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c499
-rw-r--r--drivers/scsi/fnic/fnic_isr.c18
-rw-r--r--drivers/scsi/fnic/fnic_main.c104
-rw-r--r--drivers/scsi/fnic/fnic_res.c5
-rw-r--r--drivers/scsi/fnic/fnic_res.h54
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c96
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h1
8 files changed, 414 insertions, 388 deletions
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e4c0a3d7d87b..bb208a6091e7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -22,6 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <scsi/libfc.h> 24#include <scsi/libfc.h>
25#include <scsi/libfcoe.h>
25#include "fnic_io.h" 26#include "fnic_io.h"
26#include "fnic_res.h" 27#include "fnic_res.h"
27#include "vnic_dev.h" 28#include "vnic_dev.h"
@@ -44,7 +45,7 @@
44#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ 45#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
45#define FNIC_DFLT_QUEUE_DEPTH 32 46#define FNIC_DFLT_QUEUE_DEPTH 32
46#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ 47#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
47 48#define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */
48/* 49/*
49 * Tag bits used for special requests. 50 * Tag bits used for special requests.
50 */ 51 */
@@ -145,6 +146,7 @@ struct mempool;
145/* Per-instance private data structure */ 146/* Per-instance private data structure */
146struct fnic { 147struct fnic {
147 struct fc_lport *lport; 148 struct fc_lport *lport;
149 struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
148 struct vnic_dev_bar bar0; 150 struct vnic_dev_bar bar0;
149 151
150 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; 152 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
@@ -162,23 +164,16 @@ struct fnic {
162 unsigned int wq_count; 164 unsigned int wq_count;
163 unsigned int cq_count; 165 unsigned int cq_count;
164 166
165 u32 fcoui_mode:1; /* use fcoui address*/
166 u32 vlan_hw_insert:1; /* let hw insert the tag */ 167 u32 vlan_hw_insert:1; /* let hw insert the tag */
167 u32 in_remove:1; /* fnic device in removal */ 168 u32 in_remove:1; /* fnic device in removal */
168 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ 169 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
169 170
170 struct completion *remove_wait; /* device remove thread blocks */ 171 struct completion *remove_wait; /* device remove thread blocks */
171 172
172 struct fc_frame *flogi;
173 struct fc_frame *flogi_resp;
174 u16 flogi_oxid;
175 unsigned long s_id;
176 enum fnic_state state; 173 enum fnic_state state;
177 spinlock_t fnic_lock; 174 spinlock_t fnic_lock;
178 175
179 u16 vlan_id; /* VLAN tag including priority */ 176 u16 vlan_id; /* VLAN tag including priority */
180 u8 mac_addr[ETH_ALEN];
181 u8 dest_addr[ETH_ALEN];
182 u8 data_src_addr[ETH_ALEN]; 177 u8 data_src_addr[ETH_ALEN];
183 u64 fcp_input_bytes; /* internal statistic */ 178 u64 fcp_input_bytes; /* internal statistic */
184 u64 fcp_output_bytes; /* internal statistic */ 179 u64 fcp_output_bytes; /* internal statistic */
@@ -205,6 +200,7 @@ struct fnic {
205 struct work_struct link_work; 200 struct work_struct link_work;
206 struct work_struct frame_work; 201 struct work_struct frame_work;
207 struct sk_buff_head frame_queue; 202 struct sk_buff_head frame_queue;
203 struct sk_buff_head tx_queue;
208 204
209 /* copy work queue cache line section */ 205 /* copy work queue cache line section */
210 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; 206 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
@@ -224,6 +220,11 @@ struct fnic {
224 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; 220 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
225}; 221};
226 222
223static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
224{
225 return container_of(fip, struct fnic, ctlr);
226}
227
227extern struct workqueue_struct *fnic_event_queue; 228extern struct workqueue_struct *fnic_event_queue;
228extern struct device_attribute *fnic_attrs[]; 229extern struct device_attribute *fnic_attrs[];
229 230
@@ -239,7 +240,11 @@ void fnic_handle_link(struct work_struct *work);
239int fnic_rq_cmpl_handler(struct fnic *fnic, int); 240int fnic_rq_cmpl_handler(struct fnic *fnic, int);
240int fnic_alloc_rq_frame(struct vnic_rq *rq); 241int fnic_alloc_rq_frame(struct vnic_rq *rq);
241void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); 242void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
242int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp); 243void fnic_flush_tx(struct fnic *);
244void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
245void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
246void fnic_update_mac(struct fc_lport *, u8 *new);
247void fnic_update_mac_locked(struct fnic *, u8 *new);
243 248
244int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); 249int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
245int fnic_abort_cmd(struct scsi_cmnd *); 250int fnic_abort_cmd(struct scsi_cmnd *);
@@ -252,7 +257,7 @@ void fnic_empty_scsi_cleanup(struct fc_lport *);
252void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); 257void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
253int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); 258int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
254int fnic_wq_cmpl_handler(struct fnic *fnic, int); 259int fnic_wq_cmpl_handler(struct fnic *fnic, int);
255int fnic_flogi_reg_handler(struct fnic *fnic); 260int fnic_flogi_reg_handler(struct fnic *fnic, u32);
256void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, 261void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
257 struct fcpio_host_req *desc); 262 struct fcpio_host_req *desc);
258int fnic_fw_reset_handler(struct fnic *fnic); 263int fnic_fw_reset_handler(struct fnic *fnic);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 50db3e36a619..54f8d0e5407f 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -23,6 +23,7 @@
23#include <linux/if_ether.h> 23#include <linux/if_ether.h>
24#include <linux/if_vlan.h> 24#include <linux/if_vlan.h>
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <scsi/fc/fc_fip.h>
26#include <scsi/fc/fc_els.h> 27#include <scsi/fc/fc_els.h>
27#include <scsi/fc/fc_fcoe.h> 28#include <scsi/fc/fc_fcoe.h>
28#include <scsi/fc_frame.h> 29#include <scsi/fc_frame.h>
@@ -34,6 +35,8 @@
34 35
35struct workqueue_struct *fnic_event_queue; 36struct workqueue_struct *fnic_event_queue;
36 37
38static void fnic_set_eth_mode(struct fnic *);
39
37void fnic_handle_link(struct work_struct *work) 40void fnic_handle_link(struct work_struct *work)
38{ 41{
39 struct fnic *fnic = container_of(work, struct fnic, link_work); 42 struct fnic *fnic = container_of(work, struct fnic, link_work);
@@ -64,10 +67,10 @@ void fnic_handle_link(struct work_struct *work)
64 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 67 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
65 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 68 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
66 "link down\n"); 69 "link down\n");
67 fc_linkdown(fnic->lport); 70 fcoe_ctlr_link_down(&fnic->ctlr);
68 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 71 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
69 "link up\n"); 72 "link up\n");
70 fc_linkup(fnic->lport); 73 fcoe_ctlr_link_up(&fnic->ctlr);
71 } else 74 } else
72 /* UP -> UP */ 75 /* UP -> UP */
73 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 76 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
@@ -76,13 +79,13 @@ void fnic_handle_link(struct work_struct *work)
76 /* DOWN -> UP */ 79 /* DOWN -> UP */
77 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 80 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
78 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 81 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
79 fc_linkup(fnic->lport); 82 fcoe_ctlr_link_up(&fnic->ctlr);
80 } else { 83 } else {
81 /* UP -> DOWN */ 84 /* UP -> DOWN */
82 fnic->lport->host_stats.link_failure_count++; 85 fnic->lport->host_stats.link_failure_count++;
83 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 86 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
84 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); 87 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
85 fc_linkdown(fnic->lport); 88 fcoe_ctlr_link_down(&fnic->ctlr);
86 } 89 }
87 90
88} 91}
@@ -107,197 +110,179 @@ void fnic_handle_frame(struct work_struct *work)
107 return; 110 return;
108 } 111 }
109 fp = (struct fc_frame *)skb; 112 fp = (struct fc_frame *)skb;
110 /* if Flogi resp frame, register the address */ 113
111 if (fr_flags(fp)) { 114 /*
112 vnic_dev_add_addr(fnic->vdev, 115 * If we're in a transitional state, just re-queue and return.
113 fnic->data_src_addr); 116 * The queue will be serviced when we get to a stable state.
114 fr_flags(fp) = 0; 117 */
118 if (fnic->state != FNIC_IN_FC_MODE &&
119 fnic->state != FNIC_IN_ETH_MODE) {
120 skb_queue_head(&fnic->frame_queue, skb);
121 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
122 return;
115 } 123 }
116 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 124 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
117 125
118 fc_exch_recv(lp, fp); 126 fc_exch_recv(lp, fp);
119 } 127 }
120
121}
122
123static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
124 u32 len, u8 sof, u8 eof)
125{
126 struct fc_frame *fp = (struct fc_frame *)skb;
127
128 skb_trim(skb, len);
129 fr_eof(fp) = eof;
130 fr_sof(fp) = sof;
131} 128}
132 129
133 130/**
134static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) 131 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
132 * @fnic: fnic instance.
133 * @skb: Ethernet Frame.
134 */
135static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
135{ 136{
136 struct fc_frame *fp; 137 struct fc_frame *fp;
137 struct ethhdr *eh; 138 struct ethhdr *eh;
138 struct vlan_ethhdr *vh;
139 struct fcoe_hdr *fcoe_hdr; 139 struct fcoe_hdr *fcoe_hdr;
140 struct fcoe_crc_eof *ft; 140 struct fcoe_crc_eof *ft;
141 u32 transport_len = 0;
142 141
142 /*
143 * Undo VLAN encapsulation if present.
144 */
143 eh = (struct ethhdr *)skb->data; 145 eh = (struct ethhdr *)skb->data;
144 vh = (struct vlan_ethhdr *)skb->data; 146 if (eh->h_proto == htons(ETH_P_8021Q)) {
145 if (vh->h_vlan_proto == htons(ETH_P_8021Q) && 147 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
146 vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { 148 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
147 skb_pull(skb, sizeof(struct vlan_ethhdr)); 149 skb_reset_mac_header(skb);
148 transport_len += sizeof(struct vlan_ethhdr); 150 }
149 } else if (eh->h_proto == htons(ETH_P_FCOE)) { 151 if (eh->h_proto == htons(ETH_P_FIP)) {
150 transport_len += sizeof(struct ethhdr); 152 skb_pull(skb, sizeof(*eh));
151 skb_pull(skb, sizeof(struct ethhdr)); 153 fcoe_ctlr_recv(&fnic->ctlr, skb);
152 } else 154 return 1; /* let caller know packet was used */
153 return -1; 155 }
156 if (eh->h_proto != htons(ETH_P_FCOE))
157 goto drop;
158 skb_set_network_header(skb, sizeof(*eh));
159 skb_pull(skb, sizeof(*eh));
154 160
155 fcoe_hdr = (struct fcoe_hdr *)skb->data; 161 fcoe_hdr = (struct fcoe_hdr *)skb->data;
156 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) 162 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
157 return -1; 163 goto drop;
158 164
159 fp = (struct fc_frame *)skb; 165 fp = (struct fc_frame *)skb;
160 fc_frame_init(fp); 166 fc_frame_init(fp);
161 fr_sof(fp) = fcoe_hdr->fcoe_sof; 167 fr_sof(fp) = fcoe_hdr->fcoe_sof;
162 skb_pull(skb, sizeof(struct fcoe_hdr)); 168 skb_pull(skb, sizeof(struct fcoe_hdr));
163 transport_len += sizeof(struct fcoe_hdr); 169 skb_reset_transport_header(skb);
164 170
165 ft = (struct fcoe_crc_eof *)(skb->data + len - 171 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
166 transport_len - sizeof(*ft));
167 fr_eof(fp) = ft->fcoe_eof; 172 fr_eof(fp) = ft->fcoe_eof;
168 skb_trim(skb, len - transport_len - sizeof(*ft)); 173 skb_trim(skb, skb->len - sizeof(*ft));
169 return 0; 174 return 0;
175drop:
176 dev_kfree_skb_irq(skb);
177 return -1;
170} 178}
171 179
172static inline int fnic_handle_flogi_resp(struct fnic *fnic, 180/**
173 struct fc_frame *fp) 181 * fnic_update_mac_locked() - set data MAC address and filters.
182 * @fnic: fnic instance.
183 * @new: newly-assigned FCoE MAC address.
184 *
185 * Called with the fnic lock held.
186 */
187void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
174{ 188{
175 u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; 189 u8 *ctl = fnic->ctlr.ctl_src_addr;
176 struct ethhdr *eth_hdr; 190 u8 *data = fnic->data_src_addr;
177 struct fc_frame_header *fh;
178 int ret = 0;
179 unsigned long flags;
180 struct fc_frame *old_flogi_resp = NULL;
181 191
182 fh = (struct fc_frame_header *)fr_hdr(fp); 192 if (is_zero_ether_addr(new))
193 new = ctl;
194 if (!compare_ether_addr(data, new))
195 return;
196 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
197 if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
198 vnic_dev_del_addr(fnic->vdev, data);
199 memcpy(data, new, ETH_ALEN);
200 if (compare_ether_addr(new, ctl))
201 vnic_dev_add_addr(fnic->vdev, new);
202}
183 203
184 spin_lock_irqsave(&fnic->fnic_lock, flags); 204/**
205 * fnic_update_mac() - set data MAC address and filters.
206 * @lport: local port.
207 * @new: newly-assigned FCoE MAC address.
208 */
209void fnic_update_mac(struct fc_lport *lport, u8 *new)
210{
211 struct fnic *fnic = lport_priv(lport);
185 212
186 if (fnic->state == FNIC_IN_ETH_MODE) { 213 spin_lock_irq(&fnic->fnic_lock);
214 fnic_update_mac_locked(fnic, new);
215 spin_unlock_irq(&fnic->fnic_lock);
216}
187 217
188 /* 218/**
189 * Check if oxid matches on taking the lock. A new Flogi 219 * fnic_set_port_id() - set the port_ID after successful FLOGI.
190 * issued by libFC might have changed the fnic cached oxid 220 * @lport: local port.
191 */ 221 * @port_id: assigned FC_ID.
192 if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { 222 * @fp: received frame containing the FLOGI accept or NULL.
193 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 223 *
194 "Flogi response oxid not" 224 * This is called from libfc when a new FC_ID has been assigned.
195 " matching cached oxid, dropping frame" 225 * This causes us to reset the firmware to FC_MODE and setup the new MAC
196 "\n"); 226 * address and FC_ID.
197 ret = -1; 227 *
198 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 228 * It is also called with FC_ID 0 when we're logged off.
199 dev_kfree_skb_irq(fp_skb(fp)); 229 *
200 goto handle_flogi_resp_end; 230 * If the FC_ID is due to point-to-point, fp may be NULL.
201 } 231 */
232void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
233{
234 struct fnic *fnic = lport_priv(lport);
235 u8 *mac;
236 int ret;
202 237
203 /* Drop older cached flogi response frame, cache this frame */ 238 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
204 old_flogi_resp = fnic->flogi_resp; 239 port_id, fp);
205 fnic->flogi_resp = fp;
206 fnic->flogi_oxid = FC_XID_UNKNOWN;
207 240
208 /* 241 /*
209 * this frame is part of flogi get the src mac addr from this 242 * If we're clearing the FC_ID, change to use the ctl_src_addr.
210 * frame if the src mac is fcoui based then we mark the 243 * Set ethernet mode to send FLOGI.
211 * address mode flag to use fcoui base for dst mac addr 244 */
212 * otherwise we have to store the fcoe gateway addr 245 if (!port_id) {
213 */ 246 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
214 eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); 247 fnic_set_eth_mode(fnic);
215 memcpy(mac, eth_hdr->h_source, ETH_ALEN); 248 return;
249 }
216 250
217 if (ntoh24(mac) == FC_FCOE_OUI) 251 if (fp) {
218 fnic->fcoui_mode = 1; 252 mac = fr_cb(fp)->granted_mac;
219 else { 253 if (is_zero_ether_addr(mac)) {
220 fnic->fcoui_mode = 0; 254 /* non-FIP - FLOGI already accepted - ignore return */
221 memcpy(fnic->dest_addr, mac, ETH_ALEN); 255 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
222 } 256 }
257 fnic_update_mac(lport, mac);
258 }
223 259
224 /* 260 /* Change state to reflect transition to FC mode */
225 * Except for Flogi frame, all outbound frames from us have the 261 spin_lock_irq(&fnic->fnic_lock);
226 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses 262 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
227 * the vnic MAC address as the Eth Src address
228 */
229 fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
230
231 /* We get our s_id from the d_id of the flogi resp frame */
232 fnic->s_id = ntoh24(fh->fh_d_id);
233
234 /* Change state to reflect transition from Eth to FC mode */
235 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; 263 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
236 264 else {
237 } else {
238 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 265 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
239 "Unexpected fnic state %s while" 266 "Unexpected fnic state %s while"
240 " processing flogi resp\n", 267 " processing flogi resp\n",
241 fnic_state_to_str(fnic->state)); 268 fnic_state_to_str(fnic->state));
242 ret = -1; 269 spin_unlock_irq(&fnic->fnic_lock);
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 270 return;
244 dev_kfree_skb_irq(fp_skb(fp));
245 goto handle_flogi_resp_end;
246 } 271 }
247 272 spin_unlock_irq(&fnic->fnic_lock);
248 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
249
250 /* Drop older cached frame */
251 if (old_flogi_resp)
252 dev_kfree_skb_irq(fp_skb(old_flogi_resp));
253 273
254 /* 274 /*
255 * send flogi reg request to firmware, this will put the fnic in 275 * Send FLOGI registration to firmware to set up FC mode.
256 * in FC mode 276 * The new address will be set up when registration completes.
257 */ 277 */
258 ret = fnic_flogi_reg_handler(fnic); 278 ret = fnic_flogi_reg_handler(fnic, port_id);
259 279
260 if (ret < 0) { 280 if (ret < 0) {
261 int free_fp = 1; 281 spin_lock_irq(&fnic->fnic_lock);
262 spin_lock_irqsave(&fnic->fnic_lock, flags);
263 /*
264 * free the frame is some other thread is not
265 * pointing to it
266 */
267 if (fnic->flogi_resp != fp)
268 free_fp = 0;
269 else
270 fnic->flogi_resp = NULL;
271
272 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) 282 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
273 fnic->state = FNIC_IN_ETH_MODE; 283 fnic->state = FNIC_IN_ETH_MODE;
274 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 284 spin_unlock_irq(&fnic->fnic_lock);
275 if (free_fp)
276 dev_kfree_skb_irq(fp_skb(fp));
277 } 285 }
278
279 handle_flogi_resp_end:
280 return ret;
281}
282
283/* Returns 1 for a response that matches cached flogi oxid */
284static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
285 struct fc_frame *fp)
286{
287 struct fc_frame_header *fh;
288 int ret = 0;
289 u32 f_ctl;
290
291 fh = fc_frame_header_get(fp);
292 f_ctl = ntoh24(fh->fh_f_ctl);
293
294 if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
295 fh->fh_r_ctl == FC_RCTL_ELS_REP &&
296 (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
297 fh->fh_type == FC_TYPE_ELS)
298 ret = 1;
299
300 return ret;
301} 286}
302 287
303static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc 288static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
@@ -326,6 +311,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
326 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 311 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
327 PCI_DMA_FROMDEVICE); 312 PCI_DMA_FROMDEVICE);
328 skb = buf->os_buf; 313 skb = buf->os_buf;
314 fp = (struct fc_frame *)skb;
329 buf->os_buf = NULL; 315 buf->os_buf = NULL;
330 316
331 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); 317 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
@@ -338,6 +324,9 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
338 &fcoe_enc_error, &fcs_ok, &vlan_stripped, 324 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
339 &vlan); 325 &vlan);
340 eth_hdrs_stripped = 1; 326 eth_hdrs_stripped = 1;
327 skb_trim(skb, fcp_bytes_written);
328 fr_sof(fp) = sof;
329 fr_eof(fp) = eof;
341 330
342 } else if (type == CQ_DESC_TYPE_RQ_ENET) { 331 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
343 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 332 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
@@ -352,6 +341,14 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
352 &ipv4_csum_ok, &ipv6, &ipv4, 341 &ipv4_csum_ok, &ipv6, &ipv4,
353 &ipv4_fragment, &fcs_ok); 342 &ipv4_fragment, &fcs_ok);
354 eth_hdrs_stripped = 0; 343 eth_hdrs_stripped = 0;
344 skb_trim(skb, bytes_written);
345 if (!fcs_ok) {
346 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
347 "fcs error. dropping packet.\n");
348 goto drop;
349 }
350 if (fnic_import_rq_eth_pkt(fnic, skb))
351 return;
355 352
356 } else { 353 } else {
357 /* wrong CQ type*/ 354 /* wrong CQ type*/
@@ -370,43 +367,11 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
370 goto drop; 367 goto drop;
371 } 368 }
372 369
373 if (eth_hdrs_stripped)
374 fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
375 else if (fnic_import_rq_eth_pkt(skb, bytes_written))
376 goto drop;
377
378 fp = (struct fc_frame *)skb;
379
380 /*
381 * If frame is an ELS response that matches the cached FLOGI OX_ID,
382 * and is accept, issue flogi_reg_request copy wq request to firmware
383 * to register the S_ID and determine whether FC_OUI mode or GW mode.
384 */
385 if (is_matching_flogi_resp_frame(fnic, fp)) {
386 if (!eth_hdrs_stripped) {
387 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
388 fnic_handle_flogi_resp(fnic, fp);
389 return;
390 }
391 /*
392 * Recd. Flogi reject. No point registering
393 * with fw, but forward to libFC
394 */
395 goto forward;
396 }
397 goto drop;
398 }
399 if (!eth_hdrs_stripped)
400 goto drop;
401
402forward:
403 spin_lock_irqsave(&fnic->fnic_lock, flags); 370 spin_lock_irqsave(&fnic->fnic_lock, flags);
404 if (fnic->stop_rx_link_events) { 371 if (fnic->stop_rx_link_events) {
405 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 372 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
406 goto drop; 373 goto drop;
407 } 374 }
408 /* Use fr_flags to indicate whether succ. flogi resp or not */
409 fr_flags(fp) = 0;
410 fr_dev(fp) = fnic->lport; 375 fr_dev(fp) = fnic->lport;
411 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 376 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
412 377
@@ -494,12 +459,49 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
494 buf->os_buf = NULL; 459 buf->os_buf = NULL;
495} 460}
496 461
497static inline int is_flogi_frame(struct fc_frame_header *fh) 462/**
463 * fnic_eth_send() - Send Ethernet frame.
464 * @fip: fcoe_ctlr instance.
465 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
466 */
467void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
498{ 468{
499 return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; 469 struct fnic *fnic = fnic_from_ctlr(fip);
470 struct vnic_wq *wq = &fnic->wq[0];
471 dma_addr_t pa;
472 struct ethhdr *eth_hdr;
473 struct vlan_ethhdr *vlan_hdr;
474 unsigned long flags;
475
476 if (!fnic->vlan_hw_insert) {
477 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
478 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
479 sizeof(*vlan_hdr) - sizeof(*eth_hdr));
480 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
481 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
482 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
483 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
484 }
485
486 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
487
488 spin_lock_irqsave(&fnic->wq_lock[0], flags);
489 if (!vnic_wq_desc_avail(wq)) {
490 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
491 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
492 kfree_skb(skb);
493 return;
494 }
495
496 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
497 fnic->vlan_hw_insert, fnic->vlan_id, 1);
498 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
500} 499}
501 500
502int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) 501/*
502 * Send FC frame.
503 */
504static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
503{ 505{
504 struct vnic_wq *wq = &fnic->wq[0]; 506 struct vnic_wq *wq = &fnic->wq[0];
505 struct sk_buff *skb; 507 struct sk_buff *skb;
@@ -515,6 +517,10 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
515 fh = fc_frame_header_get(fp); 517 fh = fc_frame_header_get(fp);
516 skb = fp_skb(fp); 518 skb = fp_skb(fp);
517 519
520 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
521 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
522 return 0;
523
518 if (!fnic->vlan_hw_insert) { 524 if (!fnic->vlan_hw_insert) {
519 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); 525 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
520 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); 526 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
@@ -530,16 +536,11 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
530 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); 536 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
531 } 537 }
532 538
533 if (is_flogi_frame(fh)) { 539 if (fnic->ctlr.map_dest)
534 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); 540 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
535 memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); 541 else
536 } else { 542 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
537 if (fnic->fcoui_mode) 543 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
538 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
539 else
540 memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
541 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
542 }
543 544
544 tot_len = skb->len; 545 tot_len = skb->len;
545 BUG_ON(tot_len % 4); 546 BUG_ON(tot_len % 4);
@@ -578,109 +579,85 @@ fnic_send_frame_end:
578int fnic_send(struct fc_lport *lp, struct fc_frame *fp) 579int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
579{ 580{
580 struct fnic *fnic = lport_priv(lp); 581 struct fnic *fnic = lport_priv(lp);
581 struct fc_frame_header *fh;
582 int ret = 0;
583 enum fnic_state old_state;
584 unsigned long flags; 582 unsigned long flags;
585 struct fc_frame *old_flogi = NULL;
586 struct fc_frame *old_flogi_resp = NULL;
587 583
588 if (fnic->in_remove) { 584 if (fnic->in_remove) {
589 dev_kfree_skb(fp_skb(fp)); 585 dev_kfree_skb(fp_skb(fp));
590 ret = -1; 586 return -1;
591 goto fnic_send_end;
592 } 587 }
593 588
594 fh = fc_frame_header_get(fp); 589 /*
595 /* if not an Flogi frame, send it out, this is the common case */ 590 * Queue frame if in a transitional state.
596 if (!is_flogi_frame(fh)) 591 * This occurs while registering the Port_ID / MAC address after FLOGI.
597 return fnic_send_frame(fnic, fp); 592 */
593 spin_lock_irqsave(&fnic->fnic_lock, flags);
594 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
595 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
596 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
597 return 0;
598 }
599 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
598 600
599 /* Flogi frame, now enter the state machine */ 601 return fnic_send_frame(fnic, fp);
602}
600 603
601 spin_lock_irqsave(&fnic->fnic_lock, flags); 604/**
602again: 605 * fnic_flush_tx() - send queued frames.
603 /* Get any old cached frames, free them after dropping lock */ 606 * @fnic: fnic device
604 old_flogi = fnic->flogi; 607 *
605 fnic->flogi = NULL; 608 * Send frames that were waiting to go out in FC or Ethernet mode.
606 old_flogi_resp = fnic->flogi_resp; 609 * Whenever changing modes we purge queued frames, so these frames should
607 fnic->flogi_resp = NULL; 610 * be queued for the stable mode that we're in, either FC or Ethernet.
611 *
612 * Called without fnic_lock held.
613 */
614void fnic_flush_tx(struct fnic *fnic)
615{
616 struct sk_buff *skb;
617 struct fc_frame *fp;
608 618
609 fnic->flogi_oxid = FC_XID_UNKNOWN; 619 while ((skb = skb_dequeue(&fnic->frame_queue))) {
620 fp = (struct fc_frame *)skb;
621 fnic_send_frame(fnic, fp);
622 }
623}
610 624
625/**
626 * fnic_set_eth_mode() - put fnic into ethernet mode.
627 * @fnic: fnic device
628 *
629 * Called without fnic lock held.
630 */
631static void fnic_set_eth_mode(struct fnic *fnic)
632{
633 unsigned long flags;
634 enum fnic_state old_state;
635 int ret;
636
637 spin_lock_irqsave(&fnic->fnic_lock, flags);
638again:
611 old_state = fnic->state; 639 old_state = fnic->state;
612 switch (old_state) { 640 switch (old_state) {
613 case FNIC_IN_FC_MODE: 641 case FNIC_IN_FC_MODE:
614 case FNIC_IN_ETH_TRANS_FC_MODE: 642 case FNIC_IN_ETH_TRANS_FC_MODE:
615 default: 643 default:
616 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 644 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
617 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 645 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
619 646
620 if (old_flogi) {
621 dev_kfree_skb(fp_skb(old_flogi));
622 old_flogi = NULL;
623 }
624 if (old_flogi_resp) {
625 dev_kfree_skb(fp_skb(old_flogi_resp));
626 old_flogi_resp = NULL;
627 }
628
629 ret = fnic_fw_reset_handler(fnic); 647 ret = fnic_fw_reset_handler(fnic);
630 648
631 spin_lock_irqsave(&fnic->fnic_lock, flags); 649 spin_lock_irqsave(&fnic->fnic_lock, flags);
632 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) 650 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
633 goto again; 651 goto again;
634 if (ret) { 652 if (ret)
635 fnic->state = old_state; 653 fnic->state = old_state;
636 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
637 dev_kfree_skb(fp_skb(fp));
638 goto fnic_send_end;
639 }
640 old_flogi = fnic->flogi;
641 fnic->flogi = fp;
642 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
643 old_flogi_resp = fnic->flogi_resp;
644 fnic->flogi_resp = NULL;
645 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
646 break; 654 break;
647 655
648 case FNIC_IN_FC_TRANS_ETH_MODE: 656 case FNIC_IN_FC_TRANS_ETH_MODE:
649 /*
650 * A reset is pending with the firmware. Store the flogi
651 * and its oxid. The transition out of this state happens
652 * only when Firmware completes the reset, either with
653 * success or failed. If success, transition to
654 * FNIC_IN_ETH_MODE, if fail, then transition to
655 * FNIC_IN_FC_MODE
656 */
657 fnic->flogi = fp;
658 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
659 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
660 break;
661
662 case FNIC_IN_ETH_MODE: 657 case FNIC_IN_ETH_MODE:
663 /*
664 * The fw/hw is already in eth mode. Store the oxid,
665 * and send the flogi frame out. The transition out of this
666 * state happens only we receive flogi response from the
667 * network, and the oxid matches the cached oxid when the
668 * flogi frame was sent out. If they match, then we issue
669 * a flogi_reg request and transition to state
670 * FNIC_IN_ETH_TRANS_FC_MODE
671 */
672 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
673 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
674 ret = fnic_send_frame(fnic, fp);
675 break; 658 break;
676 } 659 }
677 660 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
678fnic_send_end:
679 if (old_flogi)
680 dev_kfree_skb(fp_skb(old_flogi));
681 if (old_flogi_resp)
682 dev_kfree_skb(fp_skb(old_flogi_resp));
683 return ret;
684} 661}
685 662
686static void fnic_wq_complete_frame_send(struct vnic_wq *wq, 663static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 2b3064828aea..5c1f223cabce 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -48,9 +48,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
48 } 48 }
49 49
50 if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { 50 if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
51 work_done += fnic_wq_copy_cmpl_handler(fnic, 8); 51 work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
52 work_done += fnic_wq_cmpl_handler(fnic, 4); 52 work_done += fnic_wq_cmpl_handler(fnic, -1);
53 work_done += fnic_rq_cmpl_handler(fnic, 4); 53 work_done += fnic_rq_cmpl_handler(fnic, -1);
54 54
55 vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], 55 vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
56 work_done, 56 work_done,
@@ -66,9 +66,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
66 struct fnic *fnic = data; 66 struct fnic *fnic = data;
67 unsigned long work_done = 0; 67 unsigned long work_done = 0;
68 68
69 work_done += fnic_wq_copy_cmpl_handler(fnic, 8); 69 work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
70 work_done += fnic_wq_cmpl_handler(fnic, 4); 70 work_done += fnic_wq_cmpl_handler(fnic, -1);
71 work_done += fnic_rq_cmpl_handler(fnic, 4); 71 work_done += fnic_rq_cmpl_handler(fnic, -1);
72 72
73 vnic_intr_return_credits(&fnic->intr[0], 73 vnic_intr_return_credits(&fnic->intr[0],
74 work_done, 74 work_done,
@@ -83,7 +83,7 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
83 struct fnic *fnic = data; 83 struct fnic *fnic = data;
84 unsigned long rq_work_done = 0; 84 unsigned long rq_work_done = 0;
85 85
86 rq_work_done = fnic_rq_cmpl_handler(fnic, 4); 86 rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
87 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], 87 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
88 rq_work_done, 88 rq_work_done,
89 1 /* unmask intr */, 89 1 /* unmask intr */,
@@ -97,7 +97,7 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
97 struct fnic *fnic = data; 97 struct fnic *fnic = data;
98 unsigned long wq_work_done = 0; 98 unsigned long wq_work_done = 0;
99 99
100 wq_work_done = fnic_wq_cmpl_handler(fnic, 4); 100 wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
101 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], 101 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
102 wq_work_done, 102 wq_work_done,
103 1 /* unmask intr */, 103 1 /* unmask intr */,
@@ -110,7 +110,7 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
110 struct fnic *fnic = data; 110 struct fnic *fnic = data;
111 unsigned long wq_copy_work_done = 0; 111 unsigned long wq_copy_work_done = 0;
112 112
113 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8); 113 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
114 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], 114 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
115 wq_copy_work_done, 115 wq_copy_work_done,
116 1 /* unmask intr */, 116 1 /* unmask intr */,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 71c7bbe26d05..fe1b1031f7ab 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -25,6 +25,8 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/if_ether.h>
29#include <scsi/fc/fc_fip.h>
28#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport.h> 31#include <scsi/scsi_transport.h>
30#include <scsi/scsi_transport_fc.h> 32#include <scsi/scsi_transport_fc.h>
@@ -68,6 +70,7 @@ MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
68 70
69static struct libfc_function_template fnic_transport_template = { 71static struct libfc_function_template fnic_transport_template = {
70 .frame_send = fnic_send, 72 .frame_send = fnic_send,
73 .lport_set_port_id = fnic_set_port_id,
71 .fcp_abort_io = fnic_empty_scsi_cleanup, 74 .fcp_abort_io = fnic_empty_scsi_cleanup,
72 .fcp_cleanup = fnic_empty_scsi_cleanup, 75 .fcp_cleanup = fnic_empty_scsi_cleanup,
73 .exch_mgr_reset = fnic_exch_mgr_reset 76 .exch_mgr_reset = fnic_exch_mgr_reset
@@ -140,6 +143,7 @@ static struct fc_function_template fnic_fc_functions = {
140 .get_fc_host_stats = fnic_get_stats, 143 .get_fc_host_stats = fnic_get_stats,
141 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 144 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
142 .terminate_rport_io = fnic_terminate_rport_io, 145 .terminate_rport_io = fnic_terminate_rport_io,
146 .bsg_request = fc_lport_bsg_request,
143}; 147};
144 148
145static void fnic_get_host_speed(struct Scsi_Host *shost) 149static void fnic_get_host_speed(struct Scsi_Host *shost)
@@ -324,9 +328,6 @@ static int fnic_cleanup(struct fnic *fnic)
324{ 328{
325 unsigned int i; 329 unsigned int i;
326 int err; 330 int err;
327 unsigned long flags;
328 struct fc_frame *flogi = NULL;
329 struct fc_frame *flogi_resp = NULL;
330 331
331 vnic_dev_disable(fnic->vdev); 332 vnic_dev_disable(fnic->vdev);
332 for (i = 0; i < fnic->intr_count; i++) 333 for (i = 0; i < fnic->intr_count; i++)
@@ -367,24 +368,6 @@ static int fnic_cleanup(struct fnic *fnic)
367 for (i = 0; i < fnic->intr_count; i++) 368 for (i = 0; i < fnic->intr_count; i++)
368 vnic_intr_clean(&fnic->intr[i]); 369 vnic_intr_clean(&fnic->intr[i]);
369 370
370 /*
371 * Remove cached flogi and flogi resp frames if any
372 * These frames are not in any queue, and therefore queue
373 * cleanup does not clean them. So clean them explicitly
374 */
375 spin_lock_irqsave(&fnic->fnic_lock, flags);
376 flogi = fnic->flogi;
377 fnic->flogi = NULL;
378 flogi_resp = fnic->flogi_resp;
379 fnic->flogi_resp = NULL;
380 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
381
382 if (flogi)
383 dev_kfree_skb(fp_skb(flogi));
384
385 if (flogi_resp)
386 dev_kfree_skb(fp_skb(flogi_resp));
387
388 mempool_destroy(fnic->io_req_pool); 371 mempool_destroy(fnic->io_req_pool);
389 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) 372 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
390 mempool_destroy(fnic->io_sgl_pool[i]); 373 mempool_destroy(fnic->io_sgl_pool[i]);
@@ -409,6 +392,17 @@ static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
409 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); 392 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
410} 393}
411 394
395/**
396 * fnic_get_mac() - get assigned data MAC address for FIP code.
397 * @lport: local port.
398 */
399static u8 *fnic_get_mac(struct fc_lport *lport)
400{
401 struct fnic *fnic = lport_priv(lport);
402
403 return fnic->data_src_addr;
404}
405
412static int __devinit fnic_probe(struct pci_dev *pdev, 406static int __devinit fnic_probe(struct pci_dev *pdev,
413 const struct pci_device_id *ent) 407 const struct pci_device_id *ent)
414{ 408{
@@ -424,17 +418,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
424 * Allocate SCSI Host and set up association between host, 418 * Allocate SCSI Host and set up association between host,
425 * local port, and fnic 419 * local port, and fnic
426 */ 420 */
427 host = scsi_host_alloc(&fnic_host_template, 421 lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
428 sizeof(struct fc_lport) + sizeof(struct fnic)); 422 if (!lp) {
429 if (!host) { 423 printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
430 printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
431 err = -ENOMEM; 424 err = -ENOMEM;
432 goto err_out; 425 goto err_out;
433 } 426 }
434 lp = shost_priv(host); 427 host = lp->host;
435 lp->host = host;
436 fnic = lport_priv(lp); 428 fnic = lport_priv(lp);
437 fnic->lport = lp; 429 fnic->lport = lp;
430 fnic->ctlr.lp = lp;
438 431
439 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, 432 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
440 host->host_no); 433 host->host_no);
@@ -543,12 +536,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
543 goto err_out_dev_close; 536 goto err_out_dev_close;
544 } 537 }
545 538
546 err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); 539 err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
547 if (err) { 540 if (err) {
548 shost_printk(KERN_ERR, fnic->lport->host, 541 shost_printk(KERN_ERR, fnic->lport->host,
549 "vNIC get MAC addr failed \n"); 542 "vNIC get MAC addr failed \n");
550 goto err_out_dev_close; 543 goto err_out_dev_close;
551 } 544 }
545 /* set data_src for point-to-point mode and to keep it non-zero */
546 memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
552 547
553 /* Get vNIC configuration */ 548 /* Get vNIC configuration */
554 err = fnic_get_vnic_config(fnic); 549 err = fnic_get_vnic_config(fnic);
@@ -560,6 +555,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
560 } 555 }
561 host->max_lun = fnic->config.luns_per_tgt; 556 host->max_lun = fnic->config.luns_per_tgt;
562 host->max_id = FNIC_MAX_FCP_TARGET; 557 host->max_id = FNIC_MAX_FCP_TARGET;
558 host->max_cmd_len = FNIC_MAX_CMD_LEN;
563 559
564 fnic_get_res_counts(fnic); 560 fnic_get_res_counts(fnic);
565 561
@@ -571,19 +567,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
571 goto err_out_dev_close; 567 goto err_out_dev_close;
572 } 568 }
573 569
574 err = fnic_request_intr(fnic);
575 if (err) {
576 shost_printk(KERN_ERR, fnic->lport->host,
577 "Unable to request irq.\n");
578 goto err_out_clear_intr;
579 }
580
581 err = fnic_alloc_vnic_resources(fnic); 570 err = fnic_alloc_vnic_resources(fnic);
582 if (err) { 571 if (err) {
583 shost_printk(KERN_ERR, fnic->lport->host, 572 shost_printk(KERN_ERR, fnic->lport->host,
584 "Failed to alloc vNIC resources, " 573 "Failed to alloc vNIC resources, "
585 "aborting.\n"); 574 "aborting.\n");
586 goto err_out_free_intr; 575 goto err_out_clear_intr;
587 } 576 }
588 577
589 578
@@ -623,9 +612,21 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
623 fnic->vlan_hw_insert = 1; 612 fnic->vlan_hw_insert = 1;
624 fnic->vlan_id = 0; 613 fnic->vlan_id = 0;
625 614
626 fnic->flogi_oxid = FC_XID_UNKNOWN; 615 /* Initialize the FIP fcoe_ctrl struct */
627 fnic->flogi = NULL; 616 fnic->ctlr.send = fnic_eth_send;
628 fnic->flogi_resp = NULL; 617 fnic->ctlr.update_mac = fnic_update_mac;
618 fnic->ctlr.get_src_addr = fnic_get_mac;
619 fcoe_ctlr_init(&fnic->ctlr);
620 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
621 shost_printk(KERN_INFO, fnic->lport->host,
622 "firmware supports FIP\n");
623 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
624 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
625 } else {
626 shost_printk(KERN_INFO, fnic->lport->host,
627 "firmware uses non-FIP mode\n");
628 fnic->ctlr.mode = FIP_ST_NON_FIP;
629 }
629 fnic->state = FNIC_IN_FC_MODE; 630 fnic->state = FNIC_IN_FC_MODE;
630 631
631 /* Enable hardware stripping of vlan header on ingress */ 632 /* Enable hardware stripping of vlan header on ingress */
@@ -716,6 +717,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
716 INIT_WORK(&fnic->link_work, fnic_handle_link); 717 INIT_WORK(&fnic->link_work, fnic_handle_link);
717 INIT_WORK(&fnic->frame_work, fnic_handle_frame); 718 INIT_WORK(&fnic->frame_work, fnic_handle_frame);
718 skb_queue_head_init(&fnic->frame_queue); 719 skb_queue_head_init(&fnic->frame_queue);
720 skb_queue_head_init(&fnic->tx_queue);
719 721
720 /* Enable all queues */ 722 /* Enable all queues */
721 for (i = 0; i < fnic->raw_wq_count; i++) 723 for (i = 0; i < fnic->raw_wq_count; i++)
@@ -728,6 +730,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
728 fc_fabric_login(lp); 730 fc_fabric_login(lp);
729 731
730 vnic_dev_enable(fnic->vdev); 732 vnic_dev_enable(fnic->vdev);
733
734 err = fnic_request_intr(fnic);
735 if (err) {
736 shost_printk(KERN_ERR, fnic->lport->host,
737 "Unable to request irq.\n");
738 goto err_out_free_exch_mgr;
739 }
740
731 for (i = 0; i < fnic->intr_count; i++) 741 for (i = 0; i < fnic->intr_count; i++)
732 vnic_intr_unmask(&fnic->intr[i]); 742 vnic_intr_unmask(&fnic->intr[i]);
733 743
@@ -738,8 +748,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
738err_out_free_exch_mgr: 748err_out_free_exch_mgr:
739 fc_exch_mgr_free(lp); 749 fc_exch_mgr_free(lp);
740err_out_remove_scsi_host: 750err_out_remove_scsi_host:
741 fc_remove_host(fnic->lport->host); 751 fc_remove_host(lp->host);
742 scsi_remove_host(fnic->lport->host); 752 scsi_remove_host(lp->host);
743err_out_free_rq_buf: 753err_out_free_rq_buf:
744 for (i = 0; i < fnic->rq_count; i++) 754 for (i = 0; i < fnic->rq_count; i++)
745 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); 755 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
@@ -752,8 +762,6 @@ err_out_free_ioreq_pool:
752 mempool_destroy(fnic->io_req_pool); 762 mempool_destroy(fnic->io_req_pool);
753err_out_free_resources: 763err_out_free_resources:
754 fnic_free_vnic_resources(fnic); 764 fnic_free_vnic_resources(fnic);
755err_out_free_intr:
756 fnic_free_intr(fnic);
757err_out_clear_intr: 765err_out_clear_intr:
758 fnic_clear_intr_mode(fnic); 766 fnic_clear_intr_mode(fnic);
759err_out_dev_close: 767err_out_dev_close:
@@ -775,6 +783,7 @@ err_out:
775static void __devexit fnic_remove(struct pci_dev *pdev) 783static void __devexit fnic_remove(struct pci_dev *pdev)
776{ 784{
777 struct fnic *fnic = pci_get_drvdata(pdev); 785 struct fnic *fnic = pci_get_drvdata(pdev);
786 struct fc_lport *lp = fnic->lport;
778 unsigned long flags; 787 unsigned long flags;
779 788
780 /* 789 /*
@@ -796,6 +805,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
796 */ 805 */
797 flush_workqueue(fnic_event_queue); 806 flush_workqueue(fnic_event_queue);
798 skb_queue_purge(&fnic->frame_queue); 807 skb_queue_purge(&fnic->frame_queue);
808 skb_queue_purge(&fnic->tx_queue);
799 809
800 /* 810 /*
801 * Log off the fabric. This stops all remote ports, dns port, 811 * Log off the fabric. This stops all remote ports, dns port,
@@ -808,7 +818,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
808 fnic->in_remove = 1; 818 fnic->in_remove = 1;
809 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 819 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
810 820
811 fc_lport_destroy(fnic->lport); 821 fcoe_ctlr_destroy(&fnic->ctlr);
822 fc_lport_destroy(lp);
812 823
813 /* 824 /*
814 * This stops the fnic device, masks all interrupts. Completed 825 * This stops the fnic device, masks all interrupts. Completed
@@ -818,6 +829,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
818 fnic_cleanup(fnic); 829 fnic_cleanup(fnic);
819 830
820 BUG_ON(!skb_queue_empty(&fnic->frame_queue)); 831 BUG_ON(!skb_queue_empty(&fnic->frame_queue));
832 BUG_ON(!skb_queue_empty(&fnic->tx_queue));
821 833
822 spin_lock_irqsave(&fnic_list_lock, flags); 834 spin_lock_irqsave(&fnic_list_lock, flags);
823 list_del(&fnic->list); 835 list_del(&fnic->list);
@@ -827,8 +839,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
827 scsi_remove_host(fnic->lport->host); 839 scsi_remove_host(fnic->lport->host);
828 fc_exch_mgr_free(fnic->lport); 840 fc_exch_mgr_free(fnic->lport);
829 vnic_dev_notify_unset(fnic->vdev); 841 vnic_dev_notify_unset(fnic->vdev);
830 fnic_free_vnic_resources(fnic);
831 fnic_free_intr(fnic); 842 fnic_free_intr(fnic);
843 fnic_free_vnic_resources(fnic);
832 fnic_clear_intr_mode(fnic); 844 fnic_clear_intr_mode(fnic);
833 vnic_dev_close(fnic->vdev); 845 vnic_dev_close(fnic->vdev);
834 vnic_dev_unregister(fnic->vdev); 846 vnic_dev_unregister(fnic->vdev);
@@ -836,7 +848,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
836 pci_release_regions(pdev); 848 pci_release_regions(pdev);
837 pci_disable_device(pdev); 849 pci_disable_device(pdev);
838 pci_set_drvdata(pdev, NULL); 850 pci_set_drvdata(pdev, NULL);
839 scsi_host_put(fnic->lport->host); 851 scsi_host_put(lp->host);
840} 852}
841 853
842static struct pci_driver fnic_driver = { 854static struct pci_driver fnic_driver = {
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 7ba61ec715d2..50488f8e169d 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -144,10 +144,9 @@ int fnic_get_vnic_config(struct fnic *fnic)
144 c->intr_timer_type = c->intr_timer_type; 144 c->intr_timer_type = c->intr_timer_type;
145 145
146 shost_printk(KERN_INFO, fnic->lport->host, 146 shost_printk(KERN_INFO, fnic->lport->host,
147 "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " 147 "vNIC MAC addr %pM "
148 "wq/wq_copy/rq %d/%d/%d\n", 148 "wq/wq_copy/rq %d/%d/%d\n",
149 fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2], 149 fnic->ctlr.ctl_src_addr,
150 fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
151 c->wq_enet_desc_count, c->wq_copy_desc_count, 150 c->wq_enet_desc_count, c->wq_copy_desc_count,
152 c->rq_desc_count); 151 c->rq_desc_count);
153 shost_printk(KERN_INFO, fnic->lport->host, 152 shost_printk(KERN_INFO, fnic->lport->host,
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
index b6f310262534..ef8aaf2156dd 100644
--- a/drivers/scsi/fnic/fnic_res.h
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -51,6 +51,31 @@ static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
52} 52}
53 53
54static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq,
55 void *os_buf, dma_addr_t dma_addr,
56 unsigned int len,
57 int vlan_tag_insert,
58 unsigned int vlan_tag,
59 int cq_entry)
60{
61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
62
63 wq_enet_desc_enc(desc,
64 (u64)dma_addr | VNIC_PADDR_TARGET,
65 (u16)len,
66 0, /* mss_or_csum_offset */
67 0, /* fc_eof */
68 0, /* offload_mode */
69 1, /* eop */
70 (u8)cq_entry,
71 0, /* fcoe_encap */
72 (u8)vlan_tag_insert,
73 (u16)vlan_tag,
74 0 /* loopback */);
75
76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
77}
78
54static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, 79static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
55 u32 req_id, 80 u32 req_id,
56 u32 lunmap_id, u8 spl_flags, 81 u32 lunmap_id, u8 spl_flags,
@@ -58,6 +83,7 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
58 u64 sgl_addr, u64 sns_addr, 83 u64 sgl_addr, u64 sns_addr,
59 u8 crn, u8 pri_ta, 84 u8 crn, u8 pri_ta,
60 u8 flags, u8 *scsi_cdb, 85 u8 flags, u8 *scsi_cdb,
86 u8 cdb_len,
61 u32 data_len, u8 *lun, 87 u32 data_len, u8 *lun,
62 u32 d_id, u16 mss, 88 u32 d_id, u16 mss,
63 u32 ratov, u32 edtov) 89 u32 ratov, u32 edtov)
@@ -82,7 +108,8 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
82 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ 108 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
83 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ 109 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
84 desc->u.icmnd_16.flags = flags; /* command flags */ 110 desc->u.icmnd_16.flags = flags; /* command flags */
85 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */ 111 memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16);
112 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */
86 desc->u.icmnd_16.data_len = data_len; /* length of data expected */ 113 desc->u.icmnd_16.data_len = data_len; /* length of data expected */
87 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ 114 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
88 desc->u.icmnd_16._resvd2 = 0; /* reserved */ 115 desc->u.icmnd_16._resvd2 = 0; /* reserved */
@@ -132,12 +159,37 @@ static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
132 desc->hdr.tag.u.req_id = req_id; /* id for this request */ 159 desc->hdr.tag.u.req_id = req_id; /* id for this request */
133 160
134 desc->u.flogi_reg.format = format; 161 desc->u.flogi_reg.format = format;
162 desc->u.flogi_reg._resvd = 0;
135 hton24(desc->u.flogi_reg.s_id, s_id); 163 hton24(desc->u.flogi_reg.s_id, s_id);
136 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); 164 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
137 165
138 vnic_wq_copy_post(wq); 166 vnic_wq_copy_post(wq);
139} 167}
140 168
169static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq,
170 u32 req_id, u32 s_id,
171 u8 *fcf_mac, u8 *ha_mac,
172 u32 r_a_tov, u32 e_d_tov)
173{
174 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
175
176 desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */
177 desc->hdr.status = 0; /* header status entry */
178 desc->hdr._resvd = 0; /* reserved */
179 desc->hdr.tag.u.req_id = req_id; /* id for this request */
180
181 desc->u.flogi_fip_reg._resvd0 = 0;
182 hton24(desc->u.flogi_fip_reg.s_id, s_id);
183 memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN);
184 desc->u.flogi_fip_reg._resvd1 = 0;
185 desc->u.flogi_fip_reg.r_a_tov = r_a_tov;
186 desc->u.flogi_fip_reg.e_d_tov = e_d_tov;
187 memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN);
188 desc->u.flogi_fip_reg._resvd2 = 0;
189
190 vnic_wq_copy_post(wq);
191}
192
141static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, 193static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
142 u32 req_id) 194 u32 req_id)
143{ 195{
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index bfc996971b81..65a39b0f6dc2 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -174,6 +174,9 @@ int fnic_fw_reset_handler(struct fnic *fnic)
174 int ret = 0; 174 int ret = 0;
175 unsigned long flags; 175 unsigned long flags;
176 176
177 skb_queue_purge(&fnic->frame_queue);
178 skb_queue_purge(&fnic->tx_queue);
179
177 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); 180 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
178 181
179 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) 182 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -200,9 +203,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
200 * fnic_flogi_reg_handler 203 * fnic_flogi_reg_handler
201 * Routine to send flogi register msg to fw 204 * Routine to send flogi register msg to fw
202 */ 205 */
203int fnic_flogi_reg_handler(struct fnic *fnic) 206int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
204{ 207{
205 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; 208 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
209 enum fcpio_flogi_reg_format_type format;
210 struct fc_lport *lp = fnic->lport;
206 u8 gw_mac[ETH_ALEN]; 211 u8 gw_mac[ETH_ALEN];
207 int ret = 0; 212 int ret = 0;
208 unsigned long flags; 213 unsigned long flags;
@@ -217,23 +222,32 @@ int fnic_flogi_reg_handler(struct fnic *fnic)
217 goto flogi_reg_ioreq_end; 222 goto flogi_reg_ioreq_end;
218 } 223 }
219 224
220 if (fnic->fcoui_mode) 225 if (fnic->ctlr.map_dest) {
221 memset(gw_mac, 0xff, ETH_ALEN); 226 memset(gw_mac, 0xff, ETH_ALEN);
222 else 227 format = FCPIO_FLOGI_REG_DEF_DEST;
223 memcpy(gw_mac, fnic->dest_addr, ETH_ALEN); 228 } else {
229 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
230 format = FCPIO_FLOGI_REG_GW_DEST;
231 }
224 232
225 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, 233 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
226 FCPIO_FLOGI_REG_GW_DEST, 234 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
227 fnic->s_id, 235 fc_id, gw_mac,
228 gw_mac); 236 fnic->data_src_addr,
237 lp->r_a_tov, lp->e_d_tov);
238 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
239 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
240 fc_id, fnic->data_src_addr, gw_mac);
241 } else {
242 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
243 format, fc_id, gw_mac);
244 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
245 "FLOGI reg issued fcid %x map %d dest %pM\n",
246 fc_id, fnic->ctlr.map_dest, gw_mac);
247 }
229 248
230flogi_reg_ioreq_end: 249flogi_reg_ioreq_end:
231 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 250 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
232
233 if (!ret)
234 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
235 "flog reg issued\n");
236
237 return ret; 251 return ret;
238} 252}
239 253
@@ -319,7 +333,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
319 0, /* scsi cmd ref, always 0 */ 333 0, /* scsi cmd ref, always 0 */
320 pri_tag, /* scsi pri and tag */ 334 pri_tag, /* scsi pri and tag */
321 flags, /* command flags */ 335 flags, /* command flags */
322 sc->cmnd, scsi_bufflen(sc), 336 sc->cmnd, sc->cmd_len,
337 scsi_bufflen(sc),
323 fc_lun.scsi_lun, io_req->port_id, 338 fc_lun.scsi_lun, io_req->port_id,
324 rport->maxframe_size, rp->r_a_tov, 339 rport->maxframe_size, rp->r_a_tov,
325 rp->e_d_tov); 340 rp->e_d_tov);
@@ -452,7 +467,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
452 u8 hdr_status; 467 u8 hdr_status;
453 struct fcpio_tag tag; 468 struct fcpio_tag tag;
454 int ret = 0; 469 int ret = 0;
455 struct fc_frame *flogi;
456 unsigned long flags; 470 unsigned long flags;
457 471
458 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 472 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
@@ -462,9 +476,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
462 476
463 spin_lock_irqsave(&fnic->fnic_lock, flags); 477 spin_lock_irqsave(&fnic->fnic_lock, flags);
464 478
465 flogi = fnic->flogi;
466 fnic->flogi = NULL;
467
468 /* fnic should be in FC_TRANS_ETH_MODE */ 479 /* fnic should be in FC_TRANS_ETH_MODE */
469 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { 480 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
470 /* Check status of reset completion */ 481 /* Check status of reset completion */
@@ -505,17 +516,14 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
505 * free the flogi frame. Else, send it out 516 * free the flogi frame. Else, send it out
506 */ 517 */
507 if (fnic->remove_wait || ret) { 518 if (fnic->remove_wait || ret) {
508 fnic->flogi_oxid = FC_XID_UNKNOWN;
509 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 519 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
510 if (flogi) 520 skb_queue_purge(&fnic->tx_queue);
511 dev_kfree_skb_irq(fp_skb(flogi));
512 goto reset_cmpl_handler_end; 521 goto reset_cmpl_handler_end;
513 } 522 }
514 523
515 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 524 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
516 525
517 if (flogi) 526 fnic_flush_tx(fnic);
518 ret = fnic_send_frame(fnic, flogi);
519 527
520 reset_cmpl_handler_end: 528 reset_cmpl_handler_end:
521 return ret; 529 return ret;
@@ -532,18 +540,13 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
532 u8 hdr_status; 540 u8 hdr_status;
533 struct fcpio_tag tag; 541 struct fcpio_tag tag;
534 int ret = 0; 542 int ret = 0;
535 struct fc_frame *flogi_resp = NULL;
536 unsigned long flags; 543 unsigned long flags;
537 struct sk_buff *skb;
538 544
539 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 545 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
540 546
541 /* Update fnic state based on status of flogi reg completion */ 547 /* Update fnic state based on status of flogi reg completion */
542 spin_lock_irqsave(&fnic->fnic_lock, flags); 548 spin_lock_irqsave(&fnic->fnic_lock, flags);
543 549
544 flogi_resp = fnic->flogi_resp;
545 fnic->flogi_resp = NULL;
546
547 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { 550 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
548 551
549 /* Check flogi registration completion status */ 552 /* Check flogi registration completion status */
@@ -567,25 +570,17 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
567 ret = -1; 570 ret = -1;
568 } 571 }
569 572
570 /* Successful flogi reg cmpl, pass frame to LibFC */ 573 if (!ret) {
571 if (!ret && flogi_resp) {
572 if (fnic->stop_rx_link_events) { 574 if (fnic->stop_rx_link_events) {
573 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 575 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
574 goto reg_cmpl_handler_end; 576 goto reg_cmpl_handler_end;
575 } 577 }
576 skb = (struct sk_buff *)flogi_resp;
577 /* Use fr_flags to indicate whether flogi resp or not */
578 fr_flags(flogi_resp) = 1;
579 fr_dev(flogi_resp) = fnic->lport;
580 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 578 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
581 579
582 skb_queue_tail(&fnic->frame_queue, skb); 580 fnic_flush_tx(fnic);
583 queue_work(fnic_event_queue, &fnic->frame_work); 581 queue_work(fnic_event_queue, &fnic->frame_work);
584
585 } else { 582 } else {
586 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 583 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
587 if (flogi_resp)
588 dev_kfree_skb_irq(fp_skb(flogi_resp));
589 } 584 }
590 585
591reg_cmpl_handler_end: 586reg_cmpl_handler_end:
@@ -907,6 +902,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
907 break; 902 break;
908 903
909 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ 904 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
905 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
910 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); 906 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
911 break; 907 break;
912 908
@@ -1224,22 +1220,6 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1224 1220
1225} 1221}
1226 1222
1227static void fnic_block_error_handler(struct scsi_cmnd *sc)
1228{
1229 struct Scsi_Host *shost = sc->device->host;
1230 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
1231 unsigned long flags;
1232
1233 spin_lock_irqsave(shost->host_lock, flags);
1234 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1235 spin_unlock_irqrestore(shost->host_lock, flags);
1236 msleep(1000);
1237 spin_lock_irqsave(shost->host_lock, flags);
1238 }
1239 spin_unlock_irqrestore(shost->host_lock, flags);
1240
1241}
1242
1243/* 1223/*
1244 * This function is exported to SCSI for sending abort cmnds. 1224 * This function is exported to SCSI for sending abort cmnds.
1245 * A SCSI IO is represented by a io_req in the driver. 1225 * A SCSI IO is represented by a io_req in the driver.
@@ -1259,7 +1239,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1259 DECLARE_COMPLETION_ONSTACK(tm_done); 1239 DECLARE_COMPLETION_ONSTACK(tm_done);
1260 1240
1261 /* Wait for rport to unblock */ 1241 /* Wait for rport to unblock */
1262 fnic_block_error_handler(sc); 1242 fc_block_scsi_eh(sc);
1263 1243
1264 /* Get local-port, check ready and link up */ 1244 /* Get local-port, check ready and link up */
1265 lp = shost_priv(sc->device->host); 1245 lp = shost_priv(sc->device->host);
@@ -1541,7 +1521,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1541 DECLARE_COMPLETION_ONSTACK(tm_done); 1521 DECLARE_COMPLETION_ONSTACK(tm_done);
1542 1522
1543 /* Wait for rport to unblock */ 1523 /* Wait for rport to unblock */
1544 fnic_block_error_handler(sc); 1524 fc_block_scsi_eh(sc);
1545 1525
1546 /* Get local-port, check ready and link up */ 1526 /* Get local-port, check ready and link up */
1547 lp = shost_priv(sc->device->host); 1527 lp = shost_priv(sc->device->host);
@@ -1762,7 +1742,7 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
1762 fnic->remove_wait = &remove_wait; 1742 fnic->remove_wait = &remove_wait;
1763 old_state = fnic->state; 1743 old_state = fnic->state;
1764 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1744 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1765 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); 1745 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
1766 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1746 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1767 1747
1768 err = fnic_fw_reset_handler(fnic); 1748 err = fnic_fw_reset_handler(fnic);
@@ -1802,7 +1782,7 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
1802 spin_lock_irqsave(&fnic->fnic_lock, flags); 1782 spin_lock_irqsave(&fnic->fnic_lock, flags);
1803 old_state = fnic->state; 1783 old_state = fnic->state;
1804 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1784 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1805 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); 1785 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
1806 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1786 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1807 1787
1808 if (fnic_fw_reset_handler(fnic)) { 1788 if (fnic_fw_reset_handler(fnic)) {
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index 46baa5254001..fbb55364e272 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -95,5 +95,6 @@ struct vnic_fc_config {
95 95
96#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ 96#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
97#define VFCF_PERBI 0x2 /* persistent binding info available */ 97#define VFCF_PERBI 0x2 /* persistent binding info available */
98#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */
98 99
99#endif /* _VNIC_SCSI_H_ */ 100#endif /* _VNIC_SCSI_H_ */