Merge branch 'i2c-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvar...
[pandora-kernel.git] / drivers / scsi / fnic / fnic_fcs.c
1 /*
2  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  */
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
32 #include "fnic_io.h"
33 #include "fnic.h"
34 #include "cq_enet_desc.h"
35 #include "cq_exch_desc.h"
36
37 struct workqueue_struct *fnic_event_queue;
38
39 static void fnic_set_eth_mode(struct fnic *);
40
41 void fnic_handle_link(struct work_struct *work)
42 {
43         struct fnic *fnic = container_of(work, struct fnic, link_work);
44         unsigned long flags;
45         int old_link_status;
46         u32 old_link_down_cnt;
47
48         spin_lock_irqsave(&fnic->fnic_lock, flags);
49
50         if (fnic->stop_rx_link_events) {
51                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
52                 return;
53         }
54
55         old_link_down_cnt = fnic->link_down_cnt;
56         old_link_status = fnic->link_status;
57         fnic->link_status = vnic_dev_link_status(fnic->vdev);
58         fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
59
60         if (old_link_status == fnic->link_status) {
61                 if (!fnic->link_status)
62                         /* DOWN -> DOWN */
63                         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
64                 else {
65                         if (old_link_down_cnt != fnic->link_down_cnt) {
66                                 /* UP -> DOWN -> UP */
67                                 fnic->lport->host_stats.link_failure_count++;
68                                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
69                                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
70                                              "link down\n");
71                                 fcoe_ctlr_link_down(&fnic->ctlr);
72                                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
73                                              "link up\n");
74                                 fcoe_ctlr_link_up(&fnic->ctlr);
75                         } else
76                                 /* UP -> UP */
77                                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
78                 }
79         } else if (fnic->link_status) {
80                 /* DOWN -> UP */
81                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
82                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
83                 fcoe_ctlr_link_up(&fnic->ctlr);
84         } else {
85                 /* UP -> DOWN */
86                 fnic->lport->host_stats.link_failure_count++;
87                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
88                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
89                 fcoe_ctlr_link_down(&fnic->ctlr);
90         }
91
92 }
93
94 /*
95  * This function passes incoming fabric frames to libFC
96  */
97 void fnic_handle_frame(struct work_struct *work)
98 {
99         struct fnic *fnic = container_of(work, struct fnic, frame_work);
100         struct fc_lport *lp = fnic->lport;
101         unsigned long flags;
102         struct sk_buff *skb;
103         struct fc_frame *fp;
104
105         while ((skb = skb_dequeue(&fnic->frame_queue))) {
106
107                 spin_lock_irqsave(&fnic->fnic_lock, flags);
108                 if (fnic->stop_rx_link_events) {
109                         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
110                         dev_kfree_skb(skb);
111                         return;
112                 }
113                 fp = (struct fc_frame *)skb;
114
115                 /*
116                  * If we're in a transitional state, just re-queue and return.
117                  * The queue will be serviced when we get to a stable state.
118                  */
119                 if (fnic->state != FNIC_IN_FC_MODE &&
120                     fnic->state != FNIC_IN_ETH_MODE) {
121                         skb_queue_head(&fnic->frame_queue, skb);
122                         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
123                         return;
124                 }
125                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
126
127                 fc_exch_recv(lp, fp);
128         }
129 }
130
131 /**
132  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
133  * @fnic:       fnic instance.
134  * @skb:        Ethernet Frame.
135  */
136 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
137 {
138         struct fc_frame *fp;
139         struct ethhdr *eh;
140         struct fcoe_hdr *fcoe_hdr;
141         struct fcoe_crc_eof *ft;
142
143         /*
144          * Undo VLAN encapsulation if present.
145          */
146         eh = (struct ethhdr *)skb->data;
147         if (eh->h_proto == htons(ETH_P_8021Q)) {
148                 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
149                 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
150                 skb_reset_mac_header(skb);
151         }
152         if (eh->h_proto == htons(ETH_P_FIP)) {
153                 skb_pull(skb, sizeof(*eh));
154                 fcoe_ctlr_recv(&fnic->ctlr, skb);
155                 return 1;               /* let caller know packet was used */
156         }
157         if (eh->h_proto != htons(ETH_P_FCOE))
158                 goto drop;
159         skb_set_network_header(skb, sizeof(*eh));
160         skb_pull(skb, sizeof(*eh));
161
162         fcoe_hdr = (struct fcoe_hdr *)skb->data;
163         if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
164                 goto drop;
165
166         fp = (struct fc_frame *)skb;
167         fc_frame_init(fp);
168         fr_sof(fp) = fcoe_hdr->fcoe_sof;
169         skb_pull(skb, sizeof(struct fcoe_hdr));
170         skb_reset_transport_header(skb);
171
172         ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
173         fr_eof(fp) = ft->fcoe_eof;
174         skb_trim(skb, skb->len - sizeof(*ft));
175         return 0;
176 drop:
177         dev_kfree_skb_irq(skb);
178         return -1;
179 }
180
181 /**
182  * fnic_update_mac_locked() - set data MAC address and filters.
183  * @fnic:       fnic instance.
184  * @new:        newly-assigned FCoE MAC address.
185  *
186  * Called with the fnic lock held.
187  */
188 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
189 {
190         u8 *ctl = fnic->ctlr.ctl_src_addr;
191         u8 *data = fnic->data_src_addr;
192
193         if (is_zero_ether_addr(new))
194                 new = ctl;
195         if (!compare_ether_addr(data, new))
196                 return;
197         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
198         if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
199                 vnic_dev_del_addr(fnic->vdev, data);
200         memcpy(data, new, ETH_ALEN);
201         if (compare_ether_addr(new, ctl))
202                 vnic_dev_add_addr(fnic->vdev, new);
203 }
204
205 /**
206  * fnic_update_mac() - set data MAC address and filters.
207  * @lport:      local port.
208  * @new:        newly-assigned FCoE MAC address.
209  */
210 void fnic_update_mac(struct fc_lport *lport, u8 *new)
211 {
212         struct fnic *fnic = lport_priv(lport);
213
214         spin_lock_irq(&fnic->fnic_lock);
215         fnic_update_mac_locked(fnic, new);
216         spin_unlock_irq(&fnic->fnic_lock);
217 }
218
219 /**
220  * fnic_set_port_id() - set the port_ID after successful FLOGI.
221  * @lport:      local port.
222  * @port_id:    assigned FC_ID.
223  * @fp:         received frame containing the FLOGI accept or NULL.
224  *
225  * This is called from libfc when a new FC_ID has been assigned.
226  * This causes us to reset the firmware to FC_MODE and setup the new MAC
227  * address and FC_ID.
228  *
229  * It is also called with FC_ID 0 when we're logged off.
230  *
231  * If the FC_ID is due to point-to-point, fp may be NULL.
232  */
233 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
234 {
235         struct fnic *fnic = lport_priv(lport);
236         u8 *mac;
237         int ret;
238
239         FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
240                      port_id, fp);
241
242         /*
243          * If we're clearing the FC_ID, change to use the ctl_src_addr.
244          * Set ethernet mode to send FLOGI.
245          */
246         if (!port_id) {
247                 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
248                 fnic_set_eth_mode(fnic);
249                 return;
250         }
251
252         if (fp) {
253                 mac = fr_cb(fp)->granted_mac;
254                 if (is_zero_ether_addr(mac)) {
255                         /* non-FIP - FLOGI already accepted - ignore return */
256                         fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
257                 }
258                 fnic_update_mac(lport, mac);
259         }
260
261         /* Change state to reflect transition to FC mode */
262         spin_lock_irq(&fnic->fnic_lock);
263         if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
264                 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
265         else {
266                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
267                              "Unexpected fnic state %s while"
268                              " processing flogi resp\n",
269                              fnic_state_to_str(fnic->state));
270                 spin_unlock_irq(&fnic->fnic_lock);
271                 return;
272         }
273         spin_unlock_irq(&fnic->fnic_lock);
274
275         /*
276          * Send FLOGI registration to firmware to set up FC mode.
277          * The new address will be set up when registration completes.
278          */
279         ret = fnic_flogi_reg_handler(fnic, port_id);
280
281         if (ret < 0) {
282                 spin_lock_irq(&fnic->fnic_lock);
283                 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
284                         fnic->state = FNIC_IN_ETH_MODE;
285                 spin_unlock_irq(&fnic->fnic_lock);
286         }
287 }
288
289 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
290                                     *cq_desc, struct vnic_rq_buf *buf,
291                                     int skipped __attribute__((unused)),
292                                     void *opaque)
293 {
294         struct fnic *fnic = vnic_dev_priv(rq->vdev);
295         struct sk_buff *skb;
296         struct fc_frame *fp;
297         unsigned int eth_hdrs_stripped;
298         u8 type, color, eop, sop, ingress_port, vlan_stripped;
299         u8 fcoe = 0, fcoe_sof, fcoe_eof;
300         u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
301         u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
302         u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
303         u8 fcs_ok = 1, packet_error = 0;
304         u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
305         u32 rss_hash;
306         u16 exchange_id, tmpl;
307         u8 sof = 0;
308         u8 eof = 0;
309         u32 fcp_bytes_written = 0;
310         unsigned long flags;
311
312         pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
313                          PCI_DMA_FROMDEVICE);
314         skb = buf->os_buf;
315         fp = (struct fc_frame *)skb;
316         buf->os_buf = NULL;
317
318         cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
319         if (type == CQ_DESC_TYPE_RQ_FCP) {
320                 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
321                                    &type, &color, &q_number, &completed_index,
322                                    &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
323                                    &tmpl, &fcp_bytes_written, &sof, &eof,
324                                    &ingress_port, &packet_error,
325                                    &fcoe_enc_error, &fcs_ok, &vlan_stripped,
326                                    &vlan);
327                 eth_hdrs_stripped = 1;
328                 skb_trim(skb, fcp_bytes_written);
329                 fr_sof(fp) = sof;
330                 fr_eof(fp) = eof;
331
332         } else if (type == CQ_DESC_TYPE_RQ_ENET) {
333                 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
334                                     &type, &color, &q_number, &completed_index,
335                                     &ingress_port, &fcoe, &eop, &sop,
336                                     &rss_type, &csum_not_calc, &rss_hash,
337                                     &bytes_written, &packet_error,
338                                     &vlan_stripped, &vlan, &checksum,
339                                     &fcoe_sof, &fcoe_fc_crc_ok,
340                                     &fcoe_enc_error, &fcoe_eof,
341                                     &tcp_udp_csum_ok, &udp, &tcp,
342                                     &ipv4_csum_ok, &ipv6, &ipv4,
343                                     &ipv4_fragment, &fcs_ok);
344                 eth_hdrs_stripped = 0;
345                 skb_trim(skb, bytes_written);
346                 if (!fcs_ok) {
347                         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
348                                      "fcs error.  dropping packet.\n");
349                         goto drop;
350                 }
351                 if (fnic_import_rq_eth_pkt(fnic, skb))
352                         return;
353
354         } else {
355                 /* wrong CQ type*/
356                 shost_printk(KERN_ERR, fnic->lport->host,
357                              "fnic rq_cmpl wrong cq type x%x\n", type);
358                 goto drop;
359         }
360
361         if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
362                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
363                              "fnic rq_cmpl fcoe x%x fcsok x%x"
364                              " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
365                              " x%x\n",
366                              fcoe, fcs_ok, packet_error,
367                              fcoe_fc_crc_ok, fcoe_enc_error);
368                 goto drop;
369         }
370
371         spin_lock_irqsave(&fnic->fnic_lock, flags);
372         if (fnic->stop_rx_link_events) {
373                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
374                 goto drop;
375         }
376         fr_dev(fp) = fnic->lport;
377         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
378
379         skb_queue_tail(&fnic->frame_queue, skb);
380         queue_work(fnic_event_queue, &fnic->frame_work);
381
382         return;
383 drop:
384         dev_kfree_skb_irq(skb);
385 }
386
387 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
388                                      struct cq_desc *cq_desc, u8 type,
389                                      u16 q_number, u16 completed_index,
390                                      void *opaque)
391 {
392         struct fnic *fnic = vnic_dev_priv(vdev);
393
394         vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
395                         VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
396                         NULL);
397         return 0;
398 }
399
400 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
401 {
402         unsigned int tot_rq_work_done = 0, cur_work_done;
403         unsigned int i;
404         int err;
405
406         for (i = 0; i < fnic->rq_count; i++) {
407                 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
408                                                 fnic_rq_cmpl_handler_cont,
409                                                 NULL);
410                 if (cur_work_done) {
411                         err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
412                         if (err)
413                                 shost_printk(KERN_ERR, fnic->lport->host,
414                                              "fnic_alloc_rq_frame cant alloc"
415                                              " frame\n");
416                 }
417                 tot_rq_work_done += cur_work_done;
418         }
419
420         return tot_rq_work_done;
421 }
422
423 /*
424  * This function is called once at init time to allocate and fill RQ
425  * buffers. Subsequently, it is called in the interrupt context after RQ
426  * buffer processing to replenish the buffers in the RQ
427  */
428 int fnic_alloc_rq_frame(struct vnic_rq *rq)
429 {
430         struct fnic *fnic = vnic_dev_priv(rq->vdev);
431         struct sk_buff *skb;
432         u16 len;
433         dma_addr_t pa;
434
435         len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
436         skb = dev_alloc_skb(len);
437         if (!skb) {
438                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
439                              "Unable to allocate RQ sk_buff\n");
440                 return -ENOMEM;
441         }
442         skb_reset_mac_header(skb);
443         skb_reset_transport_header(skb);
444         skb_reset_network_header(skb);
445         skb_put(skb, len);
446         pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
447         fnic_queue_rq_desc(rq, skb, pa, len);
448         return 0;
449 }
450
451 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
452 {
453         struct fc_frame *fp = buf->os_buf;
454         struct fnic *fnic = vnic_dev_priv(rq->vdev);
455
456         pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
457                          PCI_DMA_FROMDEVICE);
458
459         dev_kfree_skb(fp_skb(fp));
460         buf->os_buf = NULL;
461 }
462
463 /**
464  * fnic_eth_send() - Send Ethernet frame.
465  * @fip:        fcoe_ctlr instance.
466  * @skb:        Ethernet Frame, FIP, without VLAN encapsulation.
467  */
468 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
469 {
470         struct fnic *fnic = fnic_from_ctlr(fip);
471         struct vnic_wq *wq = &fnic->wq[0];
472         dma_addr_t pa;
473         struct ethhdr *eth_hdr;
474         struct vlan_ethhdr *vlan_hdr;
475         unsigned long flags;
476
477         if (!fnic->vlan_hw_insert) {
478                 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
479                 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
480                                 sizeof(*vlan_hdr) - sizeof(*eth_hdr));
481                 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
482                 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
483                 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
484                 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
485         }
486
487         pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
488
489         spin_lock_irqsave(&fnic->wq_lock[0], flags);
490         if (!vnic_wq_desc_avail(wq)) {
491                 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
492                 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
493                 kfree_skb(skb);
494                 return;
495         }
496
497         fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
498                                fnic->vlan_hw_insert, fnic->vlan_id, 1);
499         spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
500 }
501
502 /*
503  * Send FC frame.
504  */
505 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
506 {
507         struct vnic_wq *wq = &fnic->wq[0];
508         struct sk_buff *skb;
509         dma_addr_t pa;
510         struct ethhdr *eth_hdr;
511         struct vlan_ethhdr *vlan_hdr;
512         struct fcoe_hdr *fcoe_hdr;
513         struct fc_frame_header *fh;
514         u32 tot_len, eth_hdr_len;
515         int ret = 0;
516         unsigned long flags;
517
518         fh = fc_frame_header_get(fp);
519         skb = fp_skb(fp);
520
521         if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
522             fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
523                 return 0;
524
525         if (!fnic->vlan_hw_insert) {
526                 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
527                 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
528                 eth_hdr = (struct ethhdr *)vlan_hdr;
529                 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
530                 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
531                 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
532                 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
533         } else {
534                 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
535                 eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
536                 eth_hdr->h_proto = htons(ETH_P_FCOE);
537                 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
538         }
539
540         if (fnic->ctlr.map_dest)
541                 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
542         else
543                 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
544         memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
545
546         tot_len = skb->len;
547         BUG_ON(tot_len % 4);
548
549         memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
550         fcoe_hdr->fcoe_sof = fr_sof(fp);
551         if (FC_FCOE_VER)
552                 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
553
554         pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
555
556         spin_lock_irqsave(&fnic->wq_lock[0], flags);
557
558         if (!vnic_wq_desc_avail(wq)) {
559                 pci_unmap_single(fnic->pdev, pa,
560                                  tot_len, PCI_DMA_TODEVICE);
561                 ret = -1;
562                 goto fnic_send_frame_end;
563         }
564
565         fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
566                            fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
567 fnic_send_frame_end:
568         spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
569
570         if (ret)
571                 dev_kfree_skb_any(fp_skb(fp));
572
573         return ret;
574 }
575
576 /*
577  * fnic_send
578  * Routine to send a raw frame
579  */
580 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
581 {
582         struct fnic *fnic = lport_priv(lp);
583         unsigned long flags;
584
585         if (fnic->in_remove) {
586                 dev_kfree_skb(fp_skb(fp));
587                 return -1;
588         }
589
590         /*
591          * Queue frame if in a transitional state.
592          * This occurs while registering the Port_ID / MAC address after FLOGI.
593          */
594         spin_lock_irqsave(&fnic->fnic_lock, flags);
595         if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
596                 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
597                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
598                 return 0;
599         }
600         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
601
602         return fnic_send_frame(fnic, fp);
603 }
604
605 /**
606  * fnic_flush_tx() - send queued frames.
607  * @fnic: fnic device
608  *
609  * Send frames that were waiting to go out in FC or Ethernet mode.
610  * Whenever changing modes we purge queued frames, so these frames should
611  * be queued for the stable mode that we're in, either FC or Ethernet.
612  *
613  * Called without fnic_lock held.
614  */
615 void fnic_flush_tx(struct fnic *fnic)
616 {
617         struct sk_buff *skb;
618         struct fc_frame *fp;
619
620         while ((skb = skb_dequeue(&fnic->tx_queue))) {
621                 fp = (struct fc_frame *)skb;
622                 fnic_send_frame(fnic, fp);
623         }
624 }
625
626 /**
627  * fnic_set_eth_mode() - put fnic into ethernet mode.
628  * @fnic: fnic device
629  *
630  * Called without fnic lock held.
631  */
632 static void fnic_set_eth_mode(struct fnic *fnic)
633 {
634         unsigned long flags;
635         enum fnic_state old_state;
636         int ret;
637
638         spin_lock_irqsave(&fnic->fnic_lock, flags);
639 again:
640         old_state = fnic->state;
641         switch (old_state) {
642         case FNIC_IN_FC_MODE:
643         case FNIC_IN_ETH_TRANS_FC_MODE:
644         default:
645                 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
646                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
647
648                 ret = fnic_fw_reset_handler(fnic);
649
650                 spin_lock_irqsave(&fnic->fnic_lock, flags);
651                 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
652                         goto again;
653                 if (ret)
654                         fnic->state = old_state;
655                 break;
656
657         case FNIC_IN_FC_TRANS_ETH_MODE:
658         case FNIC_IN_ETH_MODE:
659                 break;
660         }
661         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
662 }
663
664 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
665                                         struct cq_desc *cq_desc,
666                                         struct vnic_wq_buf *buf, void *opaque)
667 {
668         struct sk_buff *skb = buf->os_buf;
669         struct fc_frame *fp = (struct fc_frame *)skb;
670         struct fnic *fnic = vnic_dev_priv(wq->vdev);
671
672         pci_unmap_single(fnic->pdev, buf->dma_addr,
673                          buf->len, PCI_DMA_TODEVICE);
674         dev_kfree_skb_irq(fp_skb(fp));
675         buf->os_buf = NULL;
676 }
677
678 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
679                                      struct cq_desc *cq_desc, u8 type,
680                                      u16 q_number, u16 completed_index,
681                                      void *opaque)
682 {
683         struct fnic *fnic = vnic_dev_priv(vdev);
684         unsigned long flags;
685
686         spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
687         vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
688                         fnic_wq_complete_frame_send, NULL);
689         spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
690
691         return 0;
692 }
693
694 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
695 {
696         unsigned int wq_work_done = 0;
697         unsigned int i;
698
699         for (i = 0; i < fnic->raw_wq_count; i++) {
700                 wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
701                                                  work_to_do,
702                                                  fnic_wq_cmpl_handler_cont,
703                                                  NULL);
704         }
705
706         return wq_work_done;
707 }
708
709
710 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
711 {
712         struct fc_frame *fp = buf->os_buf;
713         struct fnic *fnic = vnic_dev_priv(wq->vdev);
714
715         pci_unmap_single(fnic->pdev, buf->dma_addr,
716                          buf->len, PCI_DMA_TODEVICE);
717
718         dev_kfree_skb(fp_skb(fp));
719         buf->os_buf = NULL;
720 }