diff options
Diffstat (limited to 'drivers/scsi/fnic/fnic_fcs.c')
-rw-r--r-- | drivers/scsi/fnic/fnic_fcs.c | 499 |
1 files changed, 238 insertions, 261 deletions
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 50db3e36a619..54f8d0e5407f 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/if_ether.h> | 23 | #include <linux/if_ether.h> |
24 | #include <linux/if_vlan.h> | 24 | #include <linux/if_vlan.h> |
25 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
26 | #include <scsi/fc/fc_fip.h> | ||
26 | #include <scsi/fc/fc_els.h> | 27 | #include <scsi/fc/fc_els.h> |
27 | #include <scsi/fc/fc_fcoe.h> | 28 | #include <scsi/fc/fc_fcoe.h> |
28 | #include <scsi/fc_frame.h> | 29 | #include <scsi/fc_frame.h> |
@@ -34,6 +35,8 @@ | |||
34 | 35 | ||
35 | struct workqueue_struct *fnic_event_queue; | 36 | struct workqueue_struct *fnic_event_queue; |
36 | 37 | ||
38 | static void fnic_set_eth_mode(struct fnic *); | ||
39 | |||
37 | void fnic_handle_link(struct work_struct *work) | 40 | void fnic_handle_link(struct work_struct *work) |
38 | { | 41 | { |
39 | struct fnic *fnic = container_of(work, struct fnic, link_work); | 42 | struct fnic *fnic = container_of(work, struct fnic, link_work); |
@@ -64,10 +67,10 @@ void fnic_handle_link(struct work_struct *work) | |||
64 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 67 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
65 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 68 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
66 | "link down\n"); | 69 | "link down\n"); |
67 | fc_linkdown(fnic->lport); | 70 | fcoe_ctlr_link_down(&fnic->ctlr); |
68 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 71 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
69 | "link up\n"); | 72 | "link up\n"); |
70 | fc_linkup(fnic->lport); | 73 | fcoe_ctlr_link_up(&fnic->ctlr); |
71 | } else | 74 | } else |
72 | /* UP -> UP */ | 75 | /* UP -> UP */ |
73 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 76 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
@@ -76,13 +79,13 @@ void fnic_handle_link(struct work_struct *work) | |||
76 | /* DOWN -> UP */ | 79 | /* DOWN -> UP */ |
77 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 80 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
78 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); | 81 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); |
79 | fc_linkup(fnic->lport); | 82 | fcoe_ctlr_link_up(&fnic->ctlr); |
80 | } else { | 83 | } else { |
81 | /* UP -> DOWN */ | 84 | /* UP -> DOWN */ |
82 | fnic->lport->host_stats.link_failure_count++; | 85 | fnic->lport->host_stats.link_failure_count++; |
83 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 86 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
84 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); | 87 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); |
85 | fc_linkdown(fnic->lport); | 88 | fcoe_ctlr_link_down(&fnic->ctlr); |
86 | } | 89 | } |
87 | 90 | ||
88 | } | 91 | } |
@@ -107,197 +110,179 @@ void fnic_handle_frame(struct work_struct *work) | |||
107 | return; | 110 | return; |
108 | } | 111 | } |
109 | fp = (struct fc_frame *)skb; | 112 | fp = (struct fc_frame *)skb; |
110 | /* if Flogi resp frame, register the address */ | 113 | |
111 | if (fr_flags(fp)) { | 114 | /* |
112 | vnic_dev_add_addr(fnic->vdev, | 115 | * If we're in a transitional state, just re-queue and return. |
113 | fnic->data_src_addr); | 116 | * The queue will be serviced when we get to a stable state. |
114 | fr_flags(fp) = 0; | 117 | */ |
118 | if (fnic->state != FNIC_IN_FC_MODE && | ||
119 | fnic->state != FNIC_IN_ETH_MODE) { | ||
120 | skb_queue_head(&fnic->frame_queue, skb); | ||
121 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
122 | return; | ||
115 | } | 123 | } |
116 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 124 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
117 | 125 | ||
118 | fc_exch_recv(lp, fp); | 126 | fc_exch_recv(lp, fp); |
119 | } | 127 | } |
120 | |||
121 | } | ||
122 | |||
123 | static inline void fnic_import_rq_fc_frame(struct sk_buff *skb, | ||
124 | u32 len, u8 sof, u8 eof) | ||
125 | { | ||
126 | struct fc_frame *fp = (struct fc_frame *)skb; | ||
127 | |||
128 | skb_trim(skb, len); | ||
129 | fr_eof(fp) = eof; | ||
130 | fr_sof(fp) = sof; | ||
131 | } | 128 | } |
132 | 129 | ||
133 | 130 | /** | |
134 | static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) | 131 | * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. |
132 | * @fnic: fnic instance. | ||
133 | * @skb: Ethernet Frame. | ||
134 | */ | ||
135 | static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) | ||
135 | { | 136 | { |
136 | struct fc_frame *fp; | 137 | struct fc_frame *fp; |
137 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
138 | struct vlan_ethhdr *vh; | ||
139 | struct fcoe_hdr *fcoe_hdr; | 139 | struct fcoe_hdr *fcoe_hdr; |
140 | struct fcoe_crc_eof *ft; | 140 | struct fcoe_crc_eof *ft; |
141 | u32 transport_len = 0; | ||
142 | 141 | ||
142 | /* | ||
143 | * Undo VLAN encapsulation if present. | ||
144 | */ | ||
143 | eh = (struct ethhdr *)skb->data; | 145 | eh = (struct ethhdr *)skb->data; |
144 | vh = (struct vlan_ethhdr *)skb->data; | 146 | if (eh->h_proto == htons(ETH_P_8021Q)) { |
145 | if (vh->h_vlan_proto == htons(ETH_P_8021Q) && | 147 | memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); |
146 | vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { | 148 | eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); |
147 | skb_pull(skb, sizeof(struct vlan_ethhdr)); | 149 | skb_reset_mac_header(skb); |
148 | transport_len += sizeof(struct vlan_ethhdr); | 150 | } |
149 | } else if (eh->h_proto == htons(ETH_P_FCOE)) { | 151 | if (eh->h_proto == htons(ETH_P_FIP)) { |
150 | transport_len += sizeof(struct ethhdr); | 152 | skb_pull(skb, sizeof(*eh)); |
151 | skb_pull(skb, sizeof(struct ethhdr)); | 153 | fcoe_ctlr_recv(&fnic->ctlr, skb); |
152 | } else | 154 | return 1; /* let caller know packet was used */ |
153 | return -1; | 155 | } |
156 | if (eh->h_proto != htons(ETH_P_FCOE)) | ||
157 | goto drop; | ||
158 | skb_set_network_header(skb, sizeof(*eh)); | ||
159 | skb_pull(skb, sizeof(*eh)); | ||
154 | 160 | ||
155 | fcoe_hdr = (struct fcoe_hdr *)skb->data; | 161 | fcoe_hdr = (struct fcoe_hdr *)skb->data; |
156 | if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) | 162 | if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) |
157 | return -1; | 163 | goto drop; |
158 | 164 | ||
159 | fp = (struct fc_frame *)skb; | 165 | fp = (struct fc_frame *)skb; |
160 | fc_frame_init(fp); | 166 | fc_frame_init(fp); |
161 | fr_sof(fp) = fcoe_hdr->fcoe_sof; | 167 | fr_sof(fp) = fcoe_hdr->fcoe_sof; |
162 | skb_pull(skb, sizeof(struct fcoe_hdr)); | 168 | skb_pull(skb, sizeof(struct fcoe_hdr)); |
163 | transport_len += sizeof(struct fcoe_hdr); | 169 | skb_reset_transport_header(skb); |
164 | 170 | ||
165 | ft = (struct fcoe_crc_eof *)(skb->data + len - | 171 | ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); |
166 | transport_len - sizeof(*ft)); | ||
167 | fr_eof(fp) = ft->fcoe_eof; | 172 | fr_eof(fp) = ft->fcoe_eof; |
168 | skb_trim(skb, len - transport_len - sizeof(*ft)); | 173 | skb_trim(skb, skb->len - sizeof(*ft)); |
169 | return 0; | 174 | return 0; |
175 | drop: | ||
176 | dev_kfree_skb_irq(skb); | ||
177 | return -1; | ||
170 | } | 178 | } |
171 | 179 | ||
172 | static inline int fnic_handle_flogi_resp(struct fnic *fnic, | 180 | /** |
173 | struct fc_frame *fp) | 181 | * fnic_update_mac_locked() - set data MAC address and filters. |
182 | * @fnic: fnic instance. | ||
183 | * @new: newly-assigned FCoE MAC address. | ||
184 | * | ||
185 | * Called with the fnic lock held. | ||
186 | */ | ||
187 | void fnic_update_mac_locked(struct fnic *fnic, u8 *new) | ||
174 | { | 188 | { |
175 | u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; | 189 | u8 *ctl = fnic->ctlr.ctl_src_addr; |
176 | struct ethhdr *eth_hdr; | 190 | u8 *data = fnic->data_src_addr; |
177 | struct fc_frame_header *fh; | ||
178 | int ret = 0; | ||
179 | unsigned long flags; | ||
180 | struct fc_frame *old_flogi_resp = NULL; | ||
181 | 191 | ||
182 | fh = (struct fc_frame_header *)fr_hdr(fp); | 192 | if (is_zero_ether_addr(new)) |
193 | new = ctl; | ||
194 | if (!compare_ether_addr(data, new)) | ||
195 | return; | ||
196 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); | ||
197 | if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) | ||
198 | vnic_dev_del_addr(fnic->vdev, data); | ||
199 | memcpy(data, new, ETH_ALEN); | ||
200 | if (compare_ether_addr(new, ctl)) | ||
201 | vnic_dev_add_addr(fnic->vdev, new); | ||
202 | } | ||
183 | 203 | ||
184 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 204 | /** |
205 | * fnic_update_mac() - set data MAC address and filters. | ||
206 | * @lport: local port. | ||
207 | * @new: newly-assigned FCoE MAC address. | ||
208 | */ | ||
209 | void fnic_update_mac(struct fc_lport *lport, u8 *new) | ||
210 | { | ||
211 | struct fnic *fnic = lport_priv(lport); | ||
185 | 212 | ||
186 | if (fnic->state == FNIC_IN_ETH_MODE) { | 213 | spin_lock_irq(&fnic->fnic_lock); |
214 | fnic_update_mac_locked(fnic, new); | ||
215 | spin_unlock_irq(&fnic->fnic_lock); | ||
216 | } | ||
187 | 217 | ||
188 | /* | 218 | /** |
189 | * Check if oxid matches on taking the lock. A new Flogi | 219 | * fnic_set_port_id() - set the port_ID after successful FLOGI. |
190 | * issued by libFC might have changed the fnic cached oxid | 220 | * @lport: local port. |
191 | */ | 221 | * @port_id: assigned FC_ID. |
192 | if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { | 222 | * @fp: received frame containing the FLOGI accept or NULL. |
193 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 223 | * |
194 | "Flogi response oxid not" | 224 | * This is called from libfc when a new FC_ID has been assigned. |
195 | " matching cached oxid, dropping frame" | 225 | * This causes us to reset the firmware to FC_MODE and setup the new MAC |
196 | "\n"); | 226 | * address and FC_ID. |
197 | ret = -1; | 227 | * |
198 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 228 | * It is also called with FC_ID 0 when we're logged off. |
199 | dev_kfree_skb_irq(fp_skb(fp)); | 229 | * |
200 | goto handle_flogi_resp_end; | 230 | * If the FC_ID is due to point-to-point, fp may be NULL. |
201 | } | 231 | */ |
232 | void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) | ||
233 | { | ||
234 | struct fnic *fnic = lport_priv(lport); | ||
235 | u8 *mac; | ||
236 | int ret; | ||
202 | 237 | ||
203 | /* Drop older cached flogi response frame, cache this frame */ | 238 | FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", |
204 | old_flogi_resp = fnic->flogi_resp; | 239 | port_id, fp); |
205 | fnic->flogi_resp = fp; | ||
206 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
207 | 240 | ||
208 | /* | 241 | /* |
209 | * this frame is part of flogi get the src mac addr from this | 242 | * If we're clearing the FC_ID, change to use the ctl_src_addr. |
210 | * frame if the src mac is fcoui based then we mark the | 243 | * Set ethernet mode to send FLOGI. |
211 | * address mode flag to use fcoui base for dst mac addr | 244 | */ |
212 | * otherwise we have to store the fcoe gateway addr | 245 | if (!port_id) { |
213 | */ | 246 | fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); |
214 | eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); | 247 | fnic_set_eth_mode(fnic); |
215 | memcpy(mac, eth_hdr->h_source, ETH_ALEN); | 248 | return; |
249 | } | ||
216 | 250 | ||
217 | if (ntoh24(mac) == FC_FCOE_OUI) | 251 | if (fp) { |
218 | fnic->fcoui_mode = 1; | 252 | mac = fr_cb(fp)->granted_mac; |
219 | else { | 253 | if (is_zero_ether_addr(mac)) { |
220 | fnic->fcoui_mode = 0; | 254 | /* non-FIP - FLOGI already accepted - ignore return */ |
221 | memcpy(fnic->dest_addr, mac, ETH_ALEN); | 255 | fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); |
222 | } | 256 | } |
257 | fnic_update_mac(lport, mac); | ||
258 | } | ||
223 | 259 | ||
224 | /* | 260 | /* Change state to reflect transition to FC mode */ |
225 | * Except for Flogi frame, all outbound frames from us have the | 261 | spin_lock_irq(&fnic->fnic_lock); |
226 | * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses | 262 | if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) |
227 | * the vnic MAC address as the Eth Src address | ||
228 | */ | ||
229 | fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id); | ||
230 | |||
231 | /* We get our s_id from the d_id of the flogi resp frame */ | ||
232 | fnic->s_id = ntoh24(fh->fh_d_id); | ||
233 | |||
234 | /* Change state to reflect transition from Eth to FC mode */ | ||
235 | fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; | 263 | fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; |
236 | 264 | else { | |
237 | } else { | ||
238 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 265 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
239 | "Unexpected fnic state %s while" | 266 | "Unexpected fnic state %s while" |
240 | " processing flogi resp\n", | 267 | " processing flogi resp\n", |
241 | fnic_state_to_str(fnic->state)); | 268 | fnic_state_to_str(fnic->state)); |
242 | ret = -1; | 269 | spin_unlock_irq(&fnic->fnic_lock); |
243 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 270 | return; |
244 | dev_kfree_skb_irq(fp_skb(fp)); | ||
245 | goto handle_flogi_resp_end; | ||
246 | } | 271 | } |
247 | 272 | spin_unlock_irq(&fnic->fnic_lock); | |
248 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
249 | |||
250 | /* Drop older cached frame */ | ||
251 | if (old_flogi_resp) | ||
252 | dev_kfree_skb_irq(fp_skb(old_flogi_resp)); | ||
253 | 273 | ||
254 | /* | 274 | /* |
255 | * send flogi reg request to firmware, this will put the fnic in | 275 | * Send FLOGI registration to firmware to set up FC mode. |
256 | * in FC mode | 276 | * The new address will be set up when registration completes. |
257 | */ | 277 | */ |
258 | ret = fnic_flogi_reg_handler(fnic); | 278 | ret = fnic_flogi_reg_handler(fnic, port_id); |
259 | 279 | ||
260 | if (ret < 0) { | 280 | if (ret < 0) { |
261 | int free_fp = 1; | 281 | spin_lock_irq(&fnic->fnic_lock); |
262 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
263 | /* | ||
264 | * free the frame is some other thread is not | ||
265 | * pointing to it | ||
266 | */ | ||
267 | if (fnic->flogi_resp != fp) | ||
268 | free_fp = 0; | ||
269 | else | ||
270 | fnic->flogi_resp = NULL; | ||
271 | |||
272 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) | 282 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) |
273 | fnic->state = FNIC_IN_ETH_MODE; | 283 | fnic->state = FNIC_IN_ETH_MODE; |
274 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 284 | spin_unlock_irq(&fnic->fnic_lock); |
275 | if (free_fp) | ||
276 | dev_kfree_skb_irq(fp_skb(fp)); | ||
277 | } | 285 | } |
278 | |||
279 | handle_flogi_resp_end: | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | /* Returns 1 for a response that matches cached flogi oxid */ | ||
284 | static inline int is_matching_flogi_resp_frame(struct fnic *fnic, | ||
285 | struct fc_frame *fp) | ||
286 | { | ||
287 | struct fc_frame_header *fh; | ||
288 | int ret = 0; | ||
289 | u32 f_ctl; | ||
290 | |||
291 | fh = fc_frame_header_get(fp); | ||
292 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
293 | |||
294 | if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) && | ||
295 | fh->fh_r_ctl == FC_RCTL_ELS_REP && | ||
296 | (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX && | ||
297 | fh->fh_type == FC_TYPE_ELS) | ||
298 | ret = 1; | ||
299 | |||
300 | return ret; | ||
301 | } | 286 | } |
302 | 287 | ||
303 | static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | 288 | static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc |
@@ -326,6 +311,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
326 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, | 311 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, |
327 | PCI_DMA_FROMDEVICE); | 312 | PCI_DMA_FROMDEVICE); |
328 | skb = buf->os_buf; | 313 | skb = buf->os_buf; |
314 | fp = (struct fc_frame *)skb; | ||
329 | buf->os_buf = NULL; | 315 | buf->os_buf = NULL; |
330 | 316 | ||
331 | cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); | 317 | cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); |
@@ -338,6 +324,9 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
338 | &fcoe_enc_error, &fcs_ok, &vlan_stripped, | 324 | &fcoe_enc_error, &fcs_ok, &vlan_stripped, |
339 | &vlan); | 325 | &vlan); |
340 | eth_hdrs_stripped = 1; | 326 | eth_hdrs_stripped = 1; |
327 | skb_trim(skb, fcp_bytes_written); | ||
328 | fr_sof(fp) = sof; | ||
329 | fr_eof(fp) = eof; | ||
341 | 330 | ||
342 | } else if (type == CQ_DESC_TYPE_RQ_ENET) { | 331 | } else if (type == CQ_DESC_TYPE_RQ_ENET) { |
343 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | 332 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, |
@@ -352,6 +341,14 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
352 | &ipv4_csum_ok, &ipv6, &ipv4, | 341 | &ipv4_csum_ok, &ipv6, &ipv4, |
353 | &ipv4_fragment, &fcs_ok); | 342 | &ipv4_fragment, &fcs_ok); |
354 | eth_hdrs_stripped = 0; | 343 | eth_hdrs_stripped = 0; |
344 | skb_trim(skb, bytes_written); | ||
345 | if (!fcs_ok) { | ||
346 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
347 | "fcs error. dropping packet.\n"); | ||
348 | goto drop; | ||
349 | } | ||
350 | if (fnic_import_rq_eth_pkt(fnic, skb)) | ||
351 | return; | ||
355 | 352 | ||
356 | } else { | 353 | } else { |
357 | /* wrong CQ type*/ | 354 | /* wrong CQ type*/ |
@@ -370,43 +367,11 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
370 | goto drop; | 367 | goto drop; |
371 | } | 368 | } |
372 | 369 | ||
373 | if (eth_hdrs_stripped) | ||
374 | fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); | ||
375 | else if (fnic_import_rq_eth_pkt(skb, bytes_written)) | ||
376 | goto drop; | ||
377 | |||
378 | fp = (struct fc_frame *)skb; | ||
379 | |||
380 | /* | ||
381 | * If frame is an ELS response that matches the cached FLOGI OX_ID, | ||
382 | * and is accept, issue flogi_reg_request copy wq request to firmware | ||
383 | * to register the S_ID and determine whether FC_OUI mode or GW mode. | ||
384 | */ | ||
385 | if (is_matching_flogi_resp_frame(fnic, fp)) { | ||
386 | if (!eth_hdrs_stripped) { | ||
387 | if (fc_frame_payload_op(fp) == ELS_LS_ACC) { | ||
388 | fnic_handle_flogi_resp(fnic, fp); | ||
389 | return; | ||
390 | } | ||
391 | /* | ||
392 | * Recd. Flogi reject. No point registering | ||
393 | * with fw, but forward to libFC | ||
394 | */ | ||
395 | goto forward; | ||
396 | } | ||
397 | goto drop; | ||
398 | } | ||
399 | if (!eth_hdrs_stripped) | ||
400 | goto drop; | ||
401 | |||
402 | forward: | ||
403 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 370 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
404 | if (fnic->stop_rx_link_events) { | 371 | if (fnic->stop_rx_link_events) { |
405 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 372 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
406 | goto drop; | 373 | goto drop; |
407 | } | 374 | } |
408 | /* Use fr_flags to indicate whether succ. flogi resp or not */ | ||
409 | fr_flags(fp) = 0; | ||
410 | fr_dev(fp) = fnic->lport; | 375 | fr_dev(fp) = fnic->lport; |
411 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 376 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
412 | 377 | ||
@@ -494,12 +459,49 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |||
494 | buf->os_buf = NULL; | 459 | buf->os_buf = NULL; |
495 | } | 460 | } |
496 | 461 | ||
497 | static inline int is_flogi_frame(struct fc_frame_header *fh) | 462 | /** |
463 | * fnic_eth_send() - Send Ethernet frame. | ||
464 | * @fip: fcoe_ctlr instance. | ||
465 | * @skb: Ethernet Frame, FIP, without VLAN encapsulation. | ||
466 | */ | ||
467 | void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | ||
498 | { | 468 | { |
499 | return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; | 469 | struct fnic *fnic = fnic_from_ctlr(fip); |
470 | struct vnic_wq *wq = &fnic->wq[0]; | ||
471 | dma_addr_t pa; | ||
472 | struct ethhdr *eth_hdr; | ||
473 | struct vlan_ethhdr *vlan_hdr; | ||
474 | unsigned long flags; | ||
475 | |||
476 | if (!fnic->vlan_hw_insert) { | ||
477 | eth_hdr = (struct ethhdr *)skb_mac_header(skb); | ||
478 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, | ||
479 | sizeof(*vlan_hdr) - sizeof(*eth_hdr)); | ||
480 | memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); | ||
481 | vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); | ||
482 | vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; | ||
483 | vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); | ||
484 | } | ||
485 | |||
486 | pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | ||
487 | |||
488 | spin_lock_irqsave(&fnic->wq_lock[0], flags); | ||
489 | if (!vnic_wq_desc_avail(wq)) { | ||
490 | pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); | ||
491 | spin_unlock_irqrestore(&fnic->wq_lock[0], flags); | ||
492 | kfree_skb(skb); | ||
493 | return; | ||
494 | } | ||
495 | |||
496 | fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, | ||
497 | fnic->vlan_hw_insert, fnic->vlan_id, 1); | ||
498 | spin_unlock_irqrestore(&fnic->wq_lock[0], flags); | ||
500 | } | 499 | } |
501 | 500 | ||
502 | int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | 501 | /* |
502 | * Send FC frame. | ||
503 | */ | ||
504 | static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | ||
503 | { | 505 | { |
504 | struct vnic_wq *wq = &fnic->wq[0]; | 506 | struct vnic_wq *wq = &fnic->wq[0]; |
505 | struct sk_buff *skb; | 507 | struct sk_buff *skb; |
@@ -515,6 +517,10 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | |||
515 | fh = fc_frame_header_get(fp); | 517 | fh = fc_frame_header_get(fp); |
516 | skb = fp_skb(fp); | 518 | skb = fp_skb(fp); |
517 | 519 | ||
520 | if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && | ||
521 | fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) | ||
522 | return 0; | ||
523 | |||
518 | if (!fnic->vlan_hw_insert) { | 524 | if (!fnic->vlan_hw_insert) { |
519 | eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); | 525 | eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); |
520 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); | 526 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); |
@@ -530,16 +536,11 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | |||
530 | fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); | 536 | fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); |
531 | } | 537 | } |
532 | 538 | ||
533 | if (is_flogi_frame(fh)) { | 539 | if (fnic->ctlr.map_dest) |
534 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | 540 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); |
535 | memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); | 541 | else |
536 | } else { | 542 | memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); |
537 | if (fnic->fcoui_mode) | 543 | memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); |
538 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | ||
539 | else | ||
540 | memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); | ||
541 | memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); | ||
542 | } | ||
543 | 544 | ||
544 | tot_len = skb->len; | 545 | tot_len = skb->len; |
545 | BUG_ON(tot_len % 4); | 546 | BUG_ON(tot_len % 4); |
@@ -578,109 +579,85 @@ fnic_send_frame_end: | |||
578 | int fnic_send(struct fc_lport *lp, struct fc_frame *fp) | 579 | int fnic_send(struct fc_lport *lp, struct fc_frame *fp) |
579 | { | 580 | { |
580 | struct fnic *fnic = lport_priv(lp); | 581 | struct fnic *fnic = lport_priv(lp); |
581 | struct fc_frame_header *fh; | ||
582 | int ret = 0; | ||
583 | enum fnic_state old_state; | ||
584 | unsigned long flags; | 582 | unsigned long flags; |
585 | struct fc_frame *old_flogi = NULL; | ||
586 | struct fc_frame *old_flogi_resp = NULL; | ||
587 | 583 | ||
588 | if (fnic->in_remove) { | 584 | if (fnic->in_remove) { |
589 | dev_kfree_skb(fp_skb(fp)); | 585 | dev_kfree_skb(fp_skb(fp)); |
590 | ret = -1; | 586 | return -1; |
591 | goto fnic_send_end; | ||
592 | } | 587 | } |
593 | 588 | ||
594 | fh = fc_frame_header_get(fp); | 589 | /* |
595 | /* if not an Flogi frame, send it out, this is the common case */ | 590 | * Queue frame if in a transitional state. |
596 | if (!is_flogi_frame(fh)) | 591 | * This occurs while registering the Port_ID / MAC address after FLOGI. |
597 | return fnic_send_frame(fnic, fp); | 592 | */ |
593 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
594 | if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { | ||
595 | skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); | ||
596 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
597 | return 0; | ||
598 | } | ||
599 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
598 | 600 | ||
599 | /* Flogi frame, now enter the state machine */ | 601 | return fnic_send_frame(fnic, fp); |
602 | } | ||
600 | 603 | ||
601 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 604 | /** |
602 | again: | 605 | * fnic_flush_tx() - send queued frames. |
603 | /* Get any old cached frames, free them after dropping lock */ | 606 | * @fnic: fnic device |
604 | old_flogi = fnic->flogi; | 607 | * |
605 | fnic->flogi = NULL; | 608 | * Send frames that were waiting to go out in FC or Ethernet mode. |
606 | old_flogi_resp = fnic->flogi_resp; | 609 | * Whenever changing modes we purge queued frames, so these frames should |
607 | fnic->flogi_resp = NULL; | 610 | * be queued for the stable mode that we're in, either FC or Ethernet. |
611 | * | ||
612 | * Called without fnic_lock held. | ||
613 | */ | ||
614 | void fnic_flush_tx(struct fnic *fnic) | ||
615 | { | ||
616 | struct sk_buff *skb; | ||
617 | struct fc_frame *fp; | ||
608 | 618 | ||
609 | fnic->flogi_oxid = FC_XID_UNKNOWN; | 619 | while ((skb = skb_dequeue(&fnic->frame_queue))) { |
620 | fp = (struct fc_frame *)skb; | ||
621 | fnic_send_frame(fnic, fp); | ||
622 | } | ||
623 | } | ||
610 | 624 | ||
625 | /** | ||
626 | * fnic_set_eth_mode() - put fnic into ethernet mode. | ||
627 | * @fnic: fnic device | ||
628 | * | ||
629 | * Called without fnic lock held. | ||
630 | */ | ||
631 | static void fnic_set_eth_mode(struct fnic *fnic) | ||
632 | { | ||
633 | unsigned long flags; | ||
634 | enum fnic_state old_state; | ||
635 | int ret; | ||
636 | |||
637 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
638 | again: | ||
611 | old_state = fnic->state; | 639 | old_state = fnic->state; |
612 | switch (old_state) { | 640 | switch (old_state) { |
613 | case FNIC_IN_FC_MODE: | 641 | case FNIC_IN_FC_MODE: |
614 | case FNIC_IN_ETH_TRANS_FC_MODE: | 642 | case FNIC_IN_ETH_TRANS_FC_MODE: |
615 | default: | 643 | default: |
616 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | 644 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
617 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | ||
618 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 645 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
619 | 646 | ||
620 | if (old_flogi) { | ||
621 | dev_kfree_skb(fp_skb(old_flogi)); | ||
622 | old_flogi = NULL; | ||
623 | } | ||
624 | if (old_flogi_resp) { | ||
625 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
626 | old_flogi_resp = NULL; | ||
627 | } | ||
628 | |||
629 | ret = fnic_fw_reset_handler(fnic); | 647 | ret = fnic_fw_reset_handler(fnic); |
630 | 648 | ||
631 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 649 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
632 | if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) | 650 | if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) |
633 | goto again; | 651 | goto again; |
634 | if (ret) { | 652 | if (ret) |
635 | fnic->state = old_state; | 653 | fnic->state = old_state; |
636 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
637 | dev_kfree_skb(fp_skb(fp)); | ||
638 | goto fnic_send_end; | ||
639 | } | ||
640 | old_flogi = fnic->flogi; | ||
641 | fnic->flogi = fp; | ||
642 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
643 | old_flogi_resp = fnic->flogi_resp; | ||
644 | fnic->flogi_resp = NULL; | ||
645 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
646 | break; | 654 | break; |
647 | 655 | ||
648 | case FNIC_IN_FC_TRANS_ETH_MODE: | 656 | case FNIC_IN_FC_TRANS_ETH_MODE: |
649 | /* | ||
650 | * A reset is pending with the firmware. Store the flogi | ||
651 | * and its oxid. The transition out of this state happens | ||
652 | * only when Firmware completes the reset, either with | ||
653 | * success or failed. If success, transition to | ||
654 | * FNIC_IN_ETH_MODE, if fail, then transition to | ||
655 | * FNIC_IN_FC_MODE | ||
656 | */ | ||
657 | fnic->flogi = fp; | ||
658 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
659 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
660 | break; | ||
661 | |||
662 | case FNIC_IN_ETH_MODE: | 657 | case FNIC_IN_ETH_MODE: |
663 | /* | ||
664 | * The fw/hw is already in eth mode. Store the oxid, | ||
665 | * and send the flogi frame out. The transition out of this | ||
666 | * state happens only we receive flogi response from the | ||
667 | * network, and the oxid matches the cached oxid when the | ||
668 | * flogi frame was sent out. If they match, then we issue | ||
669 | * a flogi_reg request and transition to state | ||
670 | * FNIC_IN_ETH_TRANS_FC_MODE | ||
671 | */ | ||
672 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
673 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
674 | ret = fnic_send_frame(fnic, fp); | ||
675 | break; | 658 | break; |
676 | } | 659 | } |
677 | 660 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | |
678 | fnic_send_end: | ||
679 | if (old_flogi) | ||
680 | dev_kfree_skb(fp_skb(old_flogi)); | ||
681 | if (old_flogi_resp) | ||
682 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
683 | return ret; | ||
684 | } | 661 | } |
685 | 662 | ||
686 | static void fnic_wq_complete_frame_send(struct vnic_wq *wq, | 663 | static void fnic_wq_complete_frame_send(struct vnic_wq *wq, |