diff options
Diffstat (limited to 'drivers/scsi/fnic/fnic_fcs.c')
-rw-r--r-- | drivers/scsi/fnic/fnic_fcs.c | 500 |
1 files changed, 239 insertions, 261 deletions
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 50db3e36a619..5259888fbfb1 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c | |||
@@ -17,12 +17,14 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
22 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
23 | #include <linux/if_ether.h> | 24 | #include <linux/if_ether.h> |
24 | #include <linux/if_vlan.h> | 25 | #include <linux/if_vlan.h> |
25 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <scsi/fc/fc_fip.h> | ||
26 | #include <scsi/fc/fc_els.h> | 28 | #include <scsi/fc/fc_els.h> |
27 | #include <scsi/fc/fc_fcoe.h> | 29 | #include <scsi/fc/fc_fcoe.h> |
28 | #include <scsi/fc_frame.h> | 30 | #include <scsi/fc_frame.h> |
@@ -34,6 +36,8 @@ | |||
34 | 36 | ||
35 | struct workqueue_struct *fnic_event_queue; | 37 | struct workqueue_struct *fnic_event_queue; |
36 | 38 | ||
39 | static void fnic_set_eth_mode(struct fnic *); | ||
40 | |||
37 | void fnic_handle_link(struct work_struct *work) | 41 | void fnic_handle_link(struct work_struct *work) |
38 | { | 42 | { |
39 | struct fnic *fnic = container_of(work, struct fnic, link_work); | 43 | struct fnic *fnic = container_of(work, struct fnic, link_work); |
@@ -64,10 +68,10 @@ void fnic_handle_link(struct work_struct *work) | |||
64 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 68 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
65 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 69 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
66 | "link down\n"); | 70 | "link down\n"); |
67 | fc_linkdown(fnic->lport); | 71 | fcoe_ctlr_link_down(&fnic->ctlr); |
68 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 72 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
69 | "link up\n"); | 73 | "link up\n"); |
70 | fc_linkup(fnic->lport); | 74 | fcoe_ctlr_link_up(&fnic->ctlr); |
71 | } else | 75 | } else |
72 | /* UP -> UP */ | 76 | /* UP -> UP */ |
73 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 77 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
@@ -76,13 +80,13 @@ void fnic_handle_link(struct work_struct *work) | |||
76 | /* DOWN -> UP */ | 80 | /* DOWN -> UP */ |
77 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 81 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
78 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); | 82 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); |
79 | fc_linkup(fnic->lport); | 83 | fcoe_ctlr_link_up(&fnic->ctlr); |
80 | } else { | 84 | } else { |
81 | /* UP -> DOWN */ | 85 | /* UP -> DOWN */ |
82 | fnic->lport->host_stats.link_failure_count++; | 86 | fnic->lport->host_stats.link_failure_count++; |
83 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 87 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
84 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); | 88 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); |
85 | fc_linkdown(fnic->lport); | 89 | fcoe_ctlr_link_down(&fnic->ctlr); |
86 | } | 90 | } |
87 | 91 | ||
88 | } | 92 | } |
@@ -107,197 +111,179 @@ void fnic_handle_frame(struct work_struct *work) | |||
107 | return; | 111 | return; |
108 | } | 112 | } |
109 | fp = (struct fc_frame *)skb; | 113 | fp = (struct fc_frame *)skb; |
110 | /* if Flogi resp frame, register the address */ | 114 | |
111 | if (fr_flags(fp)) { | 115 | /* |
112 | vnic_dev_add_addr(fnic->vdev, | 116 | * If we're in a transitional state, just re-queue and return. |
113 | fnic->data_src_addr); | 117 | * The queue will be serviced when we get to a stable state. |
114 | fr_flags(fp) = 0; | 118 | */ |
119 | if (fnic->state != FNIC_IN_FC_MODE && | ||
120 | fnic->state != FNIC_IN_ETH_MODE) { | ||
121 | skb_queue_head(&fnic->frame_queue, skb); | ||
122 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
123 | return; | ||
115 | } | 124 | } |
116 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 125 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
117 | 126 | ||
118 | fc_exch_recv(lp, fp); | 127 | fc_exch_recv(lp, fp); |
119 | } | 128 | } |
120 | |||
121 | } | ||
122 | |||
123 | static inline void fnic_import_rq_fc_frame(struct sk_buff *skb, | ||
124 | u32 len, u8 sof, u8 eof) | ||
125 | { | ||
126 | struct fc_frame *fp = (struct fc_frame *)skb; | ||
127 | |||
128 | skb_trim(skb, len); | ||
129 | fr_eof(fp) = eof; | ||
130 | fr_sof(fp) = sof; | ||
131 | } | 129 | } |
132 | 130 | ||
133 | 131 | /** | |
134 | static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) | 132 | * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. |
133 | * @fnic: fnic instance. | ||
134 | * @skb: Ethernet Frame. | ||
135 | */ | ||
136 | static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) | ||
135 | { | 137 | { |
136 | struct fc_frame *fp; | 138 | struct fc_frame *fp; |
137 | struct ethhdr *eh; | 139 | struct ethhdr *eh; |
138 | struct vlan_ethhdr *vh; | ||
139 | struct fcoe_hdr *fcoe_hdr; | 140 | struct fcoe_hdr *fcoe_hdr; |
140 | struct fcoe_crc_eof *ft; | 141 | struct fcoe_crc_eof *ft; |
141 | u32 transport_len = 0; | ||
142 | 142 | ||
143 | /* | ||
144 | * Undo VLAN encapsulation if present. | ||
145 | */ | ||
143 | eh = (struct ethhdr *)skb->data; | 146 | eh = (struct ethhdr *)skb->data; |
144 | vh = (struct vlan_ethhdr *)skb->data; | 147 | if (eh->h_proto == htons(ETH_P_8021Q)) { |
145 | if (vh->h_vlan_proto == htons(ETH_P_8021Q) && | 148 | memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); |
146 | vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { | 149 | eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); |
147 | skb_pull(skb, sizeof(struct vlan_ethhdr)); | 150 | skb_reset_mac_header(skb); |
148 | transport_len += sizeof(struct vlan_ethhdr); | 151 | } |
149 | } else if (eh->h_proto == htons(ETH_P_FCOE)) { | 152 | if (eh->h_proto == htons(ETH_P_FIP)) { |
150 | transport_len += sizeof(struct ethhdr); | 153 | skb_pull(skb, sizeof(*eh)); |
151 | skb_pull(skb, sizeof(struct ethhdr)); | 154 | fcoe_ctlr_recv(&fnic->ctlr, skb); |
152 | } else | 155 | return 1; /* let caller know packet was used */ |
153 | return -1; | 156 | } |
157 | if (eh->h_proto != htons(ETH_P_FCOE)) | ||
158 | goto drop; | ||
159 | skb_set_network_header(skb, sizeof(*eh)); | ||
160 | skb_pull(skb, sizeof(*eh)); | ||
154 | 161 | ||
155 | fcoe_hdr = (struct fcoe_hdr *)skb->data; | 162 | fcoe_hdr = (struct fcoe_hdr *)skb->data; |
156 | if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) | 163 | if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) |
157 | return -1; | 164 | goto drop; |
158 | 165 | ||
159 | fp = (struct fc_frame *)skb; | 166 | fp = (struct fc_frame *)skb; |
160 | fc_frame_init(fp); | 167 | fc_frame_init(fp); |
161 | fr_sof(fp) = fcoe_hdr->fcoe_sof; | 168 | fr_sof(fp) = fcoe_hdr->fcoe_sof; |
162 | skb_pull(skb, sizeof(struct fcoe_hdr)); | 169 | skb_pull(skb, sizeof(struct fcoe_hdr)); |
163 | transport_len += sizeof(struct fcoe_hdr); | 170 | skb_reset_transport_header(skb); |
164 | 171 | ||
165 | ft = (struct fcoe_crc_eof *)(skb->data + len - | 172 | ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); |
166 | transport_len - sizeof(*ft)); | ||
167 | fr_eof(fp) = ft->fcoe_eof; | 173 | fr_eof(fp) = ft->fcoe_eof; |
168 | skb_trim(skb, len - transport_len - sizeof(*ft)); | 174 | skb_trim(skb, skb->len - sizeof(*ft)); |
169 | return 0; | 175 | return 0; |
176 | drop: | ||
177 | dev_kfree_skb_irq(skb); | ||
178 | return -1; | ||
170 | } | 179 | } |
171 | 180 | ||
172 | static inline int fnic_handle_flogi_resp(struct fnic *fnic, | 181 | /** |
173 | struct fc_frame *fp) | 182 | * fnic_update_mac_locked() - set data MAC address and filters. |
183 | * @fnic: fnic instance. | ||
184 | * @new: newly-assigned FCoE MAC address. | ||
185 | * | ||
186 | * Called with the fnic lock held. | ||
187 | */ | ||
188 | void fnic_update_mac_locked(struct fnic *fnic, u8 *new) | ||
174 | { | 189 | { |
175 | u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; | 190 | u8 *ctl = fnic->ctlr.ctl_src_addr; |
176 | struct ethhdr *eth_hdr; | 191 | u8 *data = fnic->data_src_addr; |
177 | struct fc_frame_header *fh; | ||
178 | int ret = 0; | ||
179 | unsigned long flags; | ||
180 | struct fc_frame *old_flogi_resp = NULL; | ||
181 | 192 | ||
182 | fh = (struct fc_frame_header *)fr_hdr(fp); | 193 | if (is_zero_ether_addr(new)) |
194 | new = ctl; | ||
195 | if (!compare_ether_addr(data, new)) | ||
196 | return; | ||
197 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); | ||
198 | if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) | ||
199 | vnic_dev_del_addr(fnic->vdev, data); | ||
200 | memcpy(data, new, ETH_ALEN); | ||
201 | if (compare_ether_addr(new, ctl)) | ||
202 | vnic_dev_add_addr(fnic->vdev, new); | ||
203 | } | ||
183 | 204 | ||
184 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 205 | /** |
206 | * fnic_update_mac() - set data MAC address and filters. | ||
207 | * @lport: local port. | ||
208 | * @new: newly-assigned FCoE MAC address. | ||
209 | */ | ||
210 | void fnic_update_mac(struct fc_lport *lport, u8 *new) | ||
211 | { | ||
212 | struct fnic *fnic = lport_priv(lport); | ||
185 | 213 | ||
186 | if (fnic->state == FNIC_IN_ETH_MODE) { | 214 | spin_lock_irq(&fnic->fnic_lock); |
215 | fnic_update_mac_locked(fnic, new); | ||
216 | spin_unlock_irq(&fnic->fnic_lock); | ||
217 | } | ||
187 | 218 | ||
188 | /* | 219 | /** |
189 | * Check if oxid matches on taking the lock. A new Flogi | 220 | * fnic_set_port_id() - set the port_ID after successful FLOGI. |
190 | * issued by libFC might have changed the fnic cached oxid | 221 | * @lport: local port. |
191 | */ | 222 | * @port_id: assigned FC_ID. |
192 | if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { | 223 | * @fp: received frame containing the FLOGI accept or NULL. |
193 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 224 | * |
194 | "Flogi response oxid not" | 225 | * This is called from libfc when a new FC_ID has been assigned. |
195 | " matching cached oxid, dropping frame" | 226 | * This causes us to reset the firmware to FC_MODE and setup the new MAC |
196 | "\n"); | 227 | * address and FC_ID. |
197 | ret = -1; | 228 | * |
198 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 229 | * It is also called with FC_ID 0 when we're logged off. |
199 | dev_kfree_skb_irq(fp_skb(fp)); | 230 | * |
200 | goto handle_flogi_resp_end; | 231 | * If the FC_ID is due to point-to-point, fp may be NULL. |
201 | } | 232 | */ |
233 | void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) | ||
234 | { | ||
235 | struct fnic *fnic = lport_priv(lport); | ||
236 | u8 *mac; | ||
237 | int ret; | ||
202 | 238 | ||
203 | /* Drop older cached flogi response frame, cache this frame */ | 239 | FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", |
204 | old_flogi_resp = fnic->flogi_resp; | 240 | port_id, fp); |
205 | fnic->flogi_resp = fp; | ||
206 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
207 | 241 | ||
208 | /* | 242 | /* |
209 | * this frame is part of flogi get the src mac addr from this | 243 | * If we're clearing the FC_ID, change to use the ctl_src_addr. |
210 | * frame if the src mac is fcoui based then we mark the | 244 | * Set ethernet mode to send FLOGI. |
211 | * address mode flag to use fcoui base for dst mac addr | 245 | */ |
212 | * otherwise we have to store the fcoe gateway addr | 246 | if (!port_id) { |
213 | */ | 247 | fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); |
214 | eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); | 248 | fnic_set_eth_mode(fnic); |
215 | memcpy(mac, eth_hdr->h_source, ETH_ALEN); | 249 | return; |
250 | } | ||
216 | 251 | ||
217 | if (ntoh24(mac) == FC_FCOE_OUI) | 252 | if (fp) { |
218 | fnic->fcoui_mode = 1; | 253 | mac = fr_cb(fp)->granted_mac; |
219 | else { | 254 | if (is_zero_ether_addr(mac)) { |
220 | fnic->fcoui_mode = 0; | 255 | /* non-FIP - FLOGI already accepted - ignore return */ |
221 | memcpy(fnic->dest_addr, mac, ETH_ALEN); | 256 | fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); |
222 | } | 257 | } |
258 | fnic_update_mac(lport, mac); | ||
259 | } | ||
223 | 260 | ||
224 | /* | 261 | /* Change state to reflect transition to FC mode */ |
225 | * Except for Flogi frame, all outbound frames from us have the | 262 | spin_lock_irq(&fnic->fnic_lock); |
226 | * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses | 263 | if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) |
227 | * the vnic MAC address as the Eth Src address | ||
228 | */ | ||
229 | fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id); | ||
230 | |||
231 | /* We get our s_id from the d_id of the flogi resp frame */ | ||
232 | fnic->s_id = ntoh24(fh->fh_d_id); | ||
233 | |||
234 | /* Change state to reflect transition from Eth to FC mode */ | ||
235 | fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; | 264 | fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; |
236 | 265 | else { | |
237 | } else { | ||
238 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | 266 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, |
239 | "Unexpected fnic state %s while" | 267 | "Unexpected fnic state %s while" |
240 | " processing flogi resp\n", | 268 | " processing flogi resp\n", |
241 | fnic_state_to_str(fnic->state)); | 269 | fnic_state_to_str(fnic->state)); |
242 | ret = -1; | 270 | spin_unlock_irq(&fnic->fnic_lock); |
243 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 271 | return; |
244 | dev_kfree_skb_irq(fp_skb(fp)); | ||
245 | goto handle_flogi_resp_end; | ||
246 | } | 272 | } |
247 | 273 | spin_unlock_irq(&fnic->fnic_lock); | |
248 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
249 | |||
250 | /* Drop older cached frame */ | ||
251 | if (old_flogi_resp) | ||
252 | dev_kfree_skb_irq(fp_skb(old_flogi_resp)); | ||
253 | 274 | ||
254 | /* | 275 | /* |
255 | * send flogi reg request to firmware, this will put the fnic in | 276 | * Send FLOGI registration to firmware to set up FC mode. |
256 | * in FC mode | 277 | * The new address will be set up when registration completes. |
257 | */ | 278 | */ |
258 | ret = fnic_flogi_reg_handler(fnic); | 279 | ret = fnic_flogi_reg_handler(fnic, port_id); |
259 | 280 | ||
260 | if (ret < 0) { | 281 | if (ret < 0) { |
261 | int free_fp = 1; | 282 | spin_lock_irq(&fnic->fnic_lock); |
262 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
263 | /* | ||
264 | * free the frame is some other thread is not | ||
265 | * pointing to it | ||
266 | */ | ||
267 | if (fnic->flogi_resp != fp) | ||
268 | free_fp = 0; | ||
269 | else | ||
270 | fnic->flogi_resp = NULL; | ||
271 | |||
272 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) | 283 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) |
273 | fnic->state = FNIC_IN_ETH_MODE; | 284 | fnic->state = FNIC_IN_ETH_MODE; |
274 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 285 | spin_unlock_irq(&fnic->fnic_lock); |
275 | if (free_fp) | ||
276 | dev_kfree_skb_irq(fp_skb(fp)); | ||
277 | } | 286 | } |
278 | |||
279 | handle_flogi_resp_end: | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | /* Returns 1 for a response that matches cached flogi oxid */ | ||
284 | static inline int is_matching_flogi_resp_frame(struct fnic *fnic, | ||
285 | struct fc_frame *fp) | ||
286 | { | ||
287 | struct fc_frame_header *fh; | ||
288 | int ret = 0; | ||
289 | u32 f_ctl; | ||
290 | |||
291 | fh = fc_frame_header_get(fp); | ||
292 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
293 | |||
294 | if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) && | ||
295 | fh->fh_r_ctl == FC_RCTL_ELS_REP && | ||
296 | (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX && | ||
297 | fh->fh_type == FC_TYPE_ELS) | ||
298 | ret = 1; | ||
299 | |||
300 | return ret; | ||
301 | } | 287 | } |
302 | 288 | ||
303 | static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | 289 | static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc |
@@ -326,6 +312,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
326 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, | 312 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, |
327 | PCI_DMA_FROMDEVICE); | 313 | PCI_DMA_FROMDEVICE); |
328 | skb = buf->os_buf; | 314 | skb = buf->os_buf; |
315 | fp = (struct fc_frame *)skb; | ||
329 | buf->os_buf = NULL; | 316 | buf->os_buf = NULL; |
330 | 317 | ||
331 | cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); | 318 | cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); |
@@ -338,6 +325,9 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
338 | &fcoe_enc_error, &fcs_ok, &vlan_stripped, | 325 | &fcoe_enc_error, &fcs_ok, &vlan_stripped, |
339 | &vlan); | 326 | &vlan); |
340 | eth_hdrs_stripped = 1; | 327 | eth_hdrs_stripped = 1; |
328 | skb_trim(skb, fcp_bytes_written); | ||
329 | fr_sof(fp) = sof; | ||
330 | fr_eof(fp) = eof; | ||
341 | 331 | ||
342 | } else if (type == CQ_DESC_TYPE_RQ_ENET) { | 332 | } else if (type == CQ_DESC_TYPE_RQ_ENET) { |
343 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | 333 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, |
@@ -352,6 +342,14 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
352 | &ipv4_csum_ok, &ipv6, &ipv4, | 342 | &ipv4_csum_ok, &ipv6, &ipv4, |
353 | &ipv4_fragment, &fcs_ok); | 343 | &ipv4_fragment, &fcs_ok); |
354 | eth_hdrs_stripped = 0; | 344 | eth_hdrs_stripped = 0; |
345 | skb_trim(skb, bytes_written); | ||
346 | if (!fcs_ok) { | ||
347 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
348 | "fcs error. dropping packet.\n"); | ||
349 | goto drop; | ||
350 | } | ||
351 | if (fnic_import_rq_eth_pkt(fnic, skb)) | ||
352 | return; | ||
355 | 353 | ||
356 | } else { | 354 | } else { |
357 | /* wrong CQ type*/ | 355 | /* wrong CQ type*/ |
@@ -370,43 +368,11 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | |||
370 | goto drop; | 368 | goto drop; |
371 | } | 369 | } |
372 | 370 | ||
373 | if (eth_hdrs_stripped) | ||
374 | fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); | ||
375 | else if (fnic_import_rq_eth_pkt(skb, bytes_written)) | ||
376 | goto drop; | ||
377 | |||
378 | fp = (struct fc_frame *)skb; | ||
379 | |||
380 | /* | ||
381 | * If frame is an ELS response that matches the cached FLOGI OX_ID, | ||
382 | * and is accept, issue flogi_reg_request copy wq request to firmware | ||
383 | * to register the S_ID and determine whether FC_OUI mode or GW mode. | ||
384 | */ | ||
385 | if (is_matching_flogi_resp_frame(fnic, fp)) { | ||
386 | if (!eth_hdrs_stripped) { | ||
387 | if (fc_frame_payload_op(fp) == ELS_LS_ACC) { | ||
388 | fnic_handle_flogi_resp(fnic, fp); | ||
389 | return; | ||
390 | } | ||
391 | /* | ||
392 | * Recd. Flogi reject. No point registering | ||
393 | * with fw, but forward to libFC | ||
394 | */ | ||
395 | goto forward; | ||
396 | } | ||
397 | goto drop; | ||
398 | } | ||
399 | if (!eth_hdrs_stripped) | ||
400 | goto drop; | ||
401 | |||
402 | forward: | ||
403 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 371 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
404 | if (fnic->stop_rx_link_events) { | 372 | if (fnic->stop_rx_link_events) { |
405 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 373 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
406 | goto drop; | 374 | goto drop; |
407 | } | 375 | } |
408 | /* Use fr_flags to indicate whether succ. flogi resp or not */ | ||
409 | fr_flags(fp) = 0; | ||
410 | fr_dev(fp) = fnic->lport; | 376 | fr_dev(fp) = fnic->lport; |
411 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 377 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
412 | 378 | ||
@@ -494,12 +460,49 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |||
494 | buf->os_buf = NULL; | 460 | buf->os_buf = NULL; |
495 | } | 461 | } |
496 | 462 | ||
497 | static inline int is_flogi_frame(struct fc_frame_header *fh) | 463 | /** |
464 | * fnic_eth_send() - Send Ethernet frame. | ||
465 | * @fip: fcoe_ctlr instance. | ||
466 | * @skb: Ethernet Frame, FIP, without VLAN encapsulation. | ||
467 | */ | ||
468 | void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) | ||
498 | { | 469 | { |
499 | return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; | 470 | struct fnic *fnic = fnic_from_ctlr(fip); |
471 | struct vnic_wq *wq = &fnic->wq[0]; | ||
472 | dma_addr_t pa; | ||
473 | struct ethhdr *eth_hdr; | ||
474 | struct vlan_ethhdr *vlan_hdr; | ||
475 | unsigned long flags; | ||
476 | |||
477 | if (!fnic->vlan_hw_insert) { | ||
478 | eth_hdr = (struct ethhdr *)skb_mac_header(skb); | ||
479 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, | ||
480 | sizeof(*vlan_hdr) - sizeof(*eth_hdr)); | ||
481 | memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); | ||
482 | vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); | ||
483 | vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; | ||
484 | vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); | ||
485 | } | ||
486 | |||
487 | pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | ||
488 | |||
489 | spin_lock_irqsave(&fnic->wq_lock[0], flags); | ||
490 | if (!vnic_wq_desc_avail(wq)) { | ||
491 | pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); | ||
492 | spin_unlock_irqrestore(&fnic->wq_lock[0], flags); | ||
493 | kfree_skb(skb); | ||
494 | return; | ||
495 | } | ||
496 | |||
497 | fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, | ||
498 | fnic->vlan_hw_insert, fnic->vlan_id, 1); | ||
499 | spin_unlock_irqrestore(&fnic->wq_lock[0], flags); | ||
500 | } | 500 | } |
501 | 501 | ||
502 | int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | 502 | /* |
503 | * Send FC frame. | ||
504 | */ | ||
505 | static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | ||
503 | { | 506 | { |
504 | struct vnic_wq *wq = &fnic->wq[0]; | 507 | struct vnic_wq *wq = &fnic->wq[0]; |
505 | struct sk_buff *skb; | 508 | struct sk_buff *skb; |
@@ -515,6 +518,10 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | |||
515 | fh = fc_frame_header_get(fp); | 518 | fh = fc_frame_header_get(fp); |
516 | skb = fp_skb(fp); | 519 | skb = fp_skb(fp); |
517 | 520 | ||
521 | if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && | ||
522 | fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) | ||
523 | return 0; | ||
524 | |||
518 | if (!fnic->vlan_hw_insert) { | 525 | if (!fnic->vlan_hw_insert) { |
519 | eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); | 526 | eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); |
520 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); | 527 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); |
@@ -530,16 +537,11 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | |||
530 | fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); | 537 | fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); |
531 | } | 538 | } |
532 | 539 | ||
533 | if (is_flogi_frame(fh)) { | 540 | if (fnic->ctlr.map_dest) |
534 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | 541 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); |
535 | memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); | 542 | else |
536 | } else { | 543 | memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); |
537 | if (fnic->fcoui_mode) | 544 | memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); |
538 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | ||
539 | else | ||
540 | memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); | ||
541 | memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); | ||
542 | } | ||
543 | 545 | ||
544 | tot_len = skb->len; | 546 | tot_len = skb->len; |
545 | BUG_ON(tot_len % 4); | 547 | BUG_ON(tot_len % 4); |
@@ -578,109 +580,85 @@ fnic_send_frame_end: | |||
578 | int fnic_send(struct fc_lport *lp, struct fc_frame *fp) | 580 | int fnic_send(struct fc_lport *lp, struct fc_frame *fp) |
579 | { | 581 | { |
580 | struct fnic *fnic = lport_priv(lp); | 582 | struct fnic *fnic = lport_priv(lp); |
581 | struct fc_frame_header *fh; | ||
582 | int ret = 0; | ||
583 | enum fnic_state old_state; | ||
584 | unsigned long flags; | 583 | unsigned long flags; |
585 | struct fc_frame *old_flogi = NULL; | ||
586 | struct fc_frame *old_flogi_resp = NULL; | ||
587 | 584 | ||
588 | if (fnic->in_remove) { | 585 | if (fnic->in_remove) { |
589 | dev_kfree_skb(fp_skb(fp)); | 586 | dev_kfree_skb(fp_skb(fp)); |
590 | ret = -1; | 587 | return -1; |
591 | goto fnic_send_end; | ||
592 | } | 588 | } |
593 | 589 | ||
594 | fh = fc_frame_header_get(fp); | 590 | /* |
595 | /* if not an Flogi frame, send it out, this is the common case */ | 591 | * Queue frame if in a transitional state. |
596 | if (!is_flogi_frame(fh)) | 592 | * This occurs while registering the Port_ID / MAC address after FLOGI. |
597 | return fnic_send_frame(fnic, fp); | 593 | */ |
594 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
595 | if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { | ||
596 | skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); | ||
597 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
598 | return 0; | ||
599 | } | ||
600 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
598 | 601 | ||
599 | /* Flogi frame, now enter the state machine */ | 602 | return fnic_send_frame(fnic, fp); |
603 | } | ||
600 | 604 | ||
601 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 605 | /** |
602 | again: | 606 | * fnic_flush_tx() - send queued frames. |
603 | /* Get any old cached frames, free them after dropping lock */ | 607 | * @fnic: fnic device |
604 | old_flogi = fnic->flogi; | 608 | * |
605 | fnic->flogi = NULL; | 609 | * Send frames that were waiting to go out in FC or Ethernet mode. |
606 | old_flogi_resp = fnic->flogi_resp; | 610 | * Whenever changing modes we purge queued frames, so these frames should |
607 | fnic->flogi_resp = NULL; | 611 | * be queued for the stable mode that we're in, either FC or Ethernet. |
612 | * | ||
613 | * Called without fnic_lock held. | ||
614 | */ | ||
615 | void fnic_flush_tx(struct fnic *fnic) | ||
616 | { | ||
617 | struct sk_buff *skb; | ||
618 | struct fc_frame *fp; | ||
608 | 619 | ||
609 | fnic->flogi_oxid = FC_XID_UNKNOWN; | 620 | while ((skb = skb_dequeue(&fnic->frame_queue))) { |
621 | fp = (struct fc_frame *)skb; | ||
622 | fnic_send_frame(fnic, fp); | ||
623 | } | ||
624 | } | ||
610 | 625 | ||
626 | /** | ||
627 | * fnic_set_eth_mode() - put fnic into ethernet mode. | ||
628 | * @fnic: fnic device | ||
629 | * | ||
630 | * Called without fnic lock held. | ||
631 | */ | ||
632 | static void fnic_set_eth_mode(struct fnic *fnic) | ||
633 | { | ||
634 | unsigned long flags; | ||
635 | enum fnic_state old_state; | ||
636 | int ret; | ||
637 | |||
638 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
639 | again: | ||
611 | old_state = fnic->state; | 640 | old_state = fnic->state; |
612 | switch (old_state) { | 641 | switch (old_state) { |
613 | case FNIC_IN_FC_MODE: | 642 | case FNIC_IN_FC_MODE: |
614 | case FNIC_IN_ETH_TRANS_FC_MODE: | 643 | case FNIC_IN_ETH_TRANS_FC_MODE: |
615 | default: | 644 | default: |
616 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | 645 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
617 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | ||
618 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | 646 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
619 | 647 | ||
620 | if (old_flogi) { | ||
621 | dev_kfree_skb(fp_skb(old_flogi)); | ||
622 | old_flogi = NULL; | ||
623 | } | ||
624 | if (old_flogi_resp) { | ||
625 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
626 | old_flogi_resp = NULL; | ||
627 | } | ||
628 | |||
629 | ret = fnic_fw_reset_handler(fnic); | 648 | ret = fnic_fw_reset_handler(fnic); |
630 | 649 | ||
631 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 650 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
632 | if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) | 651 | if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) |
633 | goto again; | 652 | goto again; |
634 | if (ret) { | 653 | if (ret) |
635 | fnic->state = old_state; | 654 | fnic->state = old_state; |
636 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
637 | dev_kfree_skb(fp_skb(fp)); | ||
638 | goto fnic_send_end; | ||
639 | } | ||
640 | old_flogi = fnic->flogi; | ||
641 | fnic->flogi = fp; | ||
642 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
643 | old_flogi_resp = fnic->flogi_resp; | ||
644 | fnic->flogi_resp = NULL; | ||
645 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
646 | break; | 655 | break; |
647 | 656 | ||
648 | case FNIC_IN_FC_TRANS_ETH_MODE: | 657 | case FNIC_IN_FC_TRANS_ETH_MODE: |
649 | /* | ||
650 | * A reset is pending with the firmware. Store the flogi | ||
651 | * and its oxid. The transition out of this state happens | ||
652 | * only when Firmware completes the reset, either with | ||
653 | * success or failed. If success, transition to | ||
654 | * FNIC_IN_ETH_MODE, if fail, then transition to | ||
655 | * FNIC_IN_FC_MODE | ||
656 | */ | ||
657 | fnic->flogi = fp; | ||
658 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
659 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
660 | break; | ||
661 | |||
662 | case FNIC_IN_ETH_MODE: | 658 | case FNIC_IN_ETH_MODE: |
663 | /* | ||
664 | * The fw/hw is already in eth mode. Store the oxid, | ||
665 | * and send the flogi frame out. The transition out of this | ||
666 | * state happens only we receive flogi response from the | ||
667 | * network, and the oxid matches the cached oxid when the | ||
668 | * flogi frame was sent out. If they match, then we issue | ||
669 | * a flogi_reg request and transition to state | ||
670 | * FNIC_IN_ETH_TRANS_FC_MODE | ||
671 | */ | ||
672 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
673 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
674 | ret = fnic_send_frame(fnic, fp); | ||
675 | break; | 659 | break; |
676 | } | 660 | } |
677 | 661 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | |
678 | fnic_send_end: | ||
679 | if (old_flogi) | ||
680 | dev_kfree_skb(fp_skb(old_flogi)); | ||
681 | if (old_flogi_resp) | ||
682 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
683 | return ret; | ||
684 | } | 662 | } |
685 | 663 | ||
686 | static void fnic_wq_complete_frame_send(struct vnic_wq *wq, | 664 | static void fnic_wq_complete_frame_send(struct vnic_wq *wq, |