Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[pandora-kernel.git] / drivers / scsi / bnx2fc / bnx2fc_hwi.c
1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2  * This file contains the code that low level functions that interact
3  * with 57712 FCoE firmware.
4  *
5  * Copyright (c) 2008 - 2010 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12  */
13
14 #include "bnx2fc.h"
15
16 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
17
18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19                                         struct fcoe_kcqe *new_cqe_kcqe);
20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21                                         struct fcoe_kcqe *ofld_kcqe);
22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23                                                 struct fcoe_kcqe *ofld_kcqe);
24 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26                                         struct fcoe_kcqe *conn_destroy);
27
28 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29 {
30         struct fcoe_kwqe_stat stat_req;
31         struct kwqe *kwqe_arr[2];
32         int num_kwqes = 1;
33         int rc = 0;
34
35         memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36         stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
37         stat_req.hdr.flags =
38                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
39
40         stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41         stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
42
43         kwqe_arr[0] = (struct kwqe *) &stat_req;
44
45         if (hba->cnic && hba->cnic->submit_kwqes)
46                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
47
48         return rc;
49 }
50
51 /**
52  * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
53  *
54  * @hba:        adapter structure pointer
55  *
56  * Send down FCoE firmware init KWQEs which initiates the initial handshake
57  *      with the f/w.
58  *
59  */
60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
61 {
62         struct fcoe_kwqe_init1 fcoe_init1;
63         struct fcoe_kwqe_init2 fcoe_init2;
64         struct fcoe_kwqe_init3 fcoe_init3;
65         struct kwqe *kwqe_arr[3];
66         int num_kwqes = 3;
67         int rc = 0;
68
69         if (!hba->cnic) {
70                 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
71                 return -ENODEV;
72         }
73
74         /* fill init1 KWQE */
75         memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76         fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77         fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79
80         fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81         fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82         fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83         fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84         fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85         fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86         fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87         fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88         fcoe_init1.task_list_pbl_addr_hi =
89                                 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90         fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
91
92         fcoe_init1.flags = (PAGE_SHIFT <<
93                                 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
94
95         fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
96
97         /* fill init2 KWQE */
98         memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99         fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100         fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102
103         fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104         fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105
106         fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
107         fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
108                                            ((u64) hba->hash_tbl_pbl_dma >> 32);
109
110         fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
111         fcoe_init2.t2_hash_tbl_addr_hi = (u32)
112                                           ((u64) hba->t2_hash_tbl_dma >> 32);
113
114         fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
115         fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
116                                         ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
117
118         fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
119
120         /* fill init3 KWQE */
121         memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
122         fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
123         fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
124                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
125         fcoe_init3.error_bit_map_lo = 0xffffffff;
126         fcoe_init3.error_bit_map_hi = 0xffffffff;
127
128         fcoe_init3.perf_config = 1;
129
130         kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
131         kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
132         kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
133
134         if (hba->cnic && hba->cnic->submit_kwqes)
135                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
136
137         return rc;
138 }
139 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
140 {
141         struct fcoe_kwqe_destroy fcoe_destroy;
142         struct kwqe *kwqe_arr[2];
143         int num_kwqes = 1;
144         int rc = -1;
145
146         /* fill destroy KWQE */
147         memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
148         fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
149         fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
150                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
151         kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
152
153         if (hba->cnic && hba->cnic->submit_kwqes)
154                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
155         return rc;
156 }
157
158 /**
159  * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
160  *
161  * @port:               port structure pointer
162  * @tgt:                bnx2fc_rport structure pointer
163  */
164 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
165                                         struct bnx2fc_rport *tgt)
166 {
167         struct fc_lport *lport = port->lport;
168         struct bnx2fc_hba *hba = port->priv;
169         struct kwqe *kwqe_arr[4];
170         struct fcoe_kwqe_conn_offload1 ofld_req1;
171         struct fcoe_kwqe_conn_offload2 ofld_req2;
172         struct fcoe_kwqe_conn_offload3 ofld_req3;
173         struct fcoe_kwqe_conn_offload4 ofld_req4;
174         struct fc_rport_priv *rdata = tgt->rdata;
175         struct fc_rport *rport = tgt->rport;
176         int num_kwqes = 4;
177         u32 port_id;
178         int rc = 0;
179         u16 conn_id;
180
181         /* Initialize offload request 1 structure */
182         memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
183
184         ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
185         ofld_req1.hdr.flags =
186                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
187
188
189         conn_id = (u16)tgt->fcoe_conn_id;
190         ofld_req1.fcoe_conn_id = conn_id;
191
192
193         ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
194         ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
195
196         ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
197         ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
198
199         ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
200         ofld_req1.rq_first_pbe_addr_hi =
201                                 (u32)((u64) tgt->rq_dma >> 32);
202
203         ofld_req1.rq_prod = 0x8000;
204
205         /* Initialize offload request 2 structure */
206         memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
207
208         ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
209         ofld_req2.hdr.flags =
210                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
211
212         ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
213
214         ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
215         ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
216
217         ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
218         ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
219
220         ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
221         ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
222
223         /* Initialize offload request 3 structure */
224         memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
225
226         ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
227         ofld_req3.hdr.flags =
228                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
229
230         ofld_req3.vlan_tag = hba->vlan_id <<
231                                 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
232         ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
233
234         port_id = fc_host_port_id(lport->host);
235         if (port_id == 0) {
236                 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
237                 return -EINVAL;
238         }
239
240         /*
241          * Store s_id of the initiator for further reference. This will
242          * be used during disable/destroy during linkdown processing as
243          * when the lport is reset, the port_id also is reset to 0
244          */
245         tgt->sid = port_id;
246         ofld_req3.s_id[0] = (port_id & 0x000000FF);
247         ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
248         ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
249
250         port_id = rport->port_id;
251         ofld_req3.d_id[0] = (port_id & 0x000000FF);
252         ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
253         ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
254
255         ofld_req3.tx_total_conc_seqs = rdata->max_seq;
256
257         ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
258         ofld_req3.rx_max_fc_pay_len  = lport->mfs;
259
260         ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
261         ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
262         ofld_req3.rx_open_seqs_exch_c3 = 1;
263
264         ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
265         ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
266
267         /* set mul_n_port_ids supported flag to 0, until it is supported */
268         ofld_req3.flags = 0;
269         /*
270         ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
271                             FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
272         */
273         /* Info from PLOGI response */
274         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
275                              FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
276
277         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
278                              FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
279
280         /* vlan flag */
281         ofld_req3.flags |= (hba->vlan_enabled <<
282                             FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
283
284         /* C2_VALID and ACK flags are not set as they are not suppported */
285
286
287         /* Initialize offload request 4 structure */
288         memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
289         ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
290         ofld_req4.hdr.flags =
291                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
292
293         ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
294
295
296         ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
297                                                         /* local mac */
298         ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
299         ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
300         ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
301         ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
302         ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
303         ofld_req4.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
304         ofld_req4.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
305         ofld_req4.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
306         ofld_req4.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
307         ofld_req4.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
308         ofld_req4.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
309
310         ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
311         ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
312
313         ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
314         ofld_req4.confq_pbl_base_addr_hi =
315                                         (u32)((u64) tgt->confq_pbl_dma >> 32);
316
317         kwqe_arr[0] = (struct kwqe *) &ofld_req1;
318         kwqe_arr[1] = (struct kwqe *) &ofld_req2;
319         kwqe_arr[2] = (struct kwqe *) &ofld_req3;
320         kwqe_arr[3] = (struct kwqe *) &ofld_req4;
321
322         if (hba->cnic && hba->cnic->submit_kwqes)
323                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
324
325         return rc;
326 }
327
328 /**
329  * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
330  *
331  * @port:               port structure pointer
332  * @tgt:                bnx2fc_rport structure pointer
333  */
334 static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
335                                         struct bnx2fc_rport *tgt)
336 {
337         struct kwqe *kwqe_arr[2];
338         struct bnx2fc_hba *hba = port->priv;
339         struct fcoe_kwqe_conn_enable_disable enbl_req;
340         struct fc_lport *lport = port->lport;
341         struct fc_rport *rport = tgt->rport;
342         int num_kwqes = 1;
343         int rc = 0;
344         u32 port_id;
345
346         memset(&enbl_req, 0x00,
347                sizeof(struct fcoe_kwqe_conn_enable_disable));
348         enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
349         enbl_req.hdr.flags =
350                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
351
352         enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
353                                                         /* local mac */
354         enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
355         enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
356         enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
357         enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
358         enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
359         memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
360
361         enbl_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
362         enbl_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
363         enbl_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
364         enbl_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
365         enbl_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
366         enbl_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
367
368         port_id = fc_host_port_id(lport->host);
369         if (port_id != tgt->sid) {
370                 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
371                                 "sid = 0x%x\n", port_id, tgt->sid);
372                 port_id = tgt->sid;
373         }
374         enbl_req.s_id[0] = (port_id & 0x000000FF);
375         enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
376         enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
377
378         port_id = rport->port_id;
379         enbl_req.d_id[0] = (port_id & 0x000000FF);
380         enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
381         enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
382         enbl_req.vlan_tag = hba->vlan_id <<
383                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
384         enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
385         enbl_req.vlan_flag = hba->vlan_enabled;
386         enbl_req.context_id = tgt->context_id;
387         enbl_req.conn_id = tgt->fcoe_conn_id;
388
389         kwqe_arr[0] = (struct kwqe *) &enbl_req;
390
391         if (hba->cnic && hba->cnic->submit_kwqes)
392                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
393         return rc;
394 }
395
396 /**
397  * bnx2fc_send_session_disable_req - initiates FCoE Session disable
398  *
399  * @port:               port structure pointer
400  * @tgt:                bnx2fc_rport structure pointer
401  */
402 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
403                                     struct bnx2fc_rport *tgt)
404 {
405         struct bnx2fc_hba *hba = port->priv;
406         struct fcoe_kwqe_conn_enable_disable disable_req;
407         struct kwqe *kwqe_arr[2];
408         struct fc_rport *rport = tgt->rport;
409         int num_kwqes = 1;
410         int rc = 0;
411         u32 port_id;
412
413         memset(&disable_req, 0x00,
414                sizeof(struct fcoe_kwqe_conn_enable_disable));
415         disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
416         disable_req.hdr.flags =
417                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
418
419         disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
420         disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
421         disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
422         disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
423         disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
424         disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
425
426         disable_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
427         disable_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
428         disable_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
429         disable_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
430         disable_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
431         disable_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
432
433         port_id = tgt->sid;
434         disable_req.s_id[0] = (port_id & 0x000000FF);
435         disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
436         disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
437
438
439         port_id = rport->port_id;
440         disable_req.d_id[0] = (port_id & 0x000000FF);
441         disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
442         disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
443         disable_req.context_id = tgt->context_id;
444         disable_req.conn_id = tgt->fcoe_conn_id;
445         disable_req.vlan_tag = hba->vlan_id <<
446                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
447         disable_req.vlan_tag |=
448                         3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
449         disable_req.vlan_flag = hba->vlan_enabled;
450
451         kwqe_arr[0] = (struct kwqe *) &disable_req;
452
453         if (hba->cnic && hba->cnic->submit_kwqes)
454                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
455
456         return rc;
457 }
458
459 /**
460  * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
461  *
462  * @port:               port structure pointer
463  * @tgt:                bnx2fc_rport structure pointer
464  */
465 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
466                                         struct bnx2fc_rport *tgt)
467 {
468         struct fcoe_kwqe_conn_destroy destroy_req;
469         struct kwqe *kwqe_arr[2];
470         int num_kwqes = 1;
471         int rc = 0;
472
473         memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
474         destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
475         destroy_req.hdr.flags =
476                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
477
478         destroy_req.context_id = tgt->context_id;
479         destroy_req.conn_id = tgt->fcoe_conn_id;
480
481         kwqe_arr[0] = (struct kwqe *) &destroy_req;
482
483         if (hba->cnic && hba->cnic->submit_kwqes)
484                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
485
486         return rc;
487 }
488
489 static void bnx2fc_unsol_els_work(struct work_struct *work)
490 {
491         struct bnx2fc_unsol_els *unsol_els;
492         struct fc_lport *lport;
493         struct fc_frame *fp;
494
495         unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
496         lport = unsol_els->lport;
497         fp = unsol_els->fp;
498         fc_exch_recv(lport, fp);
499         kfree(unsol_els);
500 }
501
502 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
503                                    unsigned char *buf,
504                                    u32 frame_len, u16 l2_oxid)
505 {
506         struct fcoe_port *port = tgt->port;
507         struct fc_lport *lport = port->lport;
508         struct bnx2fc_unsol_els *unsol_els;
509         struct fc_frame_header *fh;
510         struct fc_frame *fp;
511         struct sk_buff *skb;
512         u32 payload_len;
513         u32 crc;
514         u8 op;
515
516
517         unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
518         if (!unsol_els) {
519                 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
520                 return;
521         }
522
523         BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
524                 l2_oxid, frame_len);
525
526         payload_len = frame_len - sizeof(struct fc_frame_header);
527
528         fp = fc_frame_alloc(lport, payload_len);
529         if (!fp) {
530                 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
531                 kfree(unsol_els);
532                 return;
533         }
534
535         fh = (struct fc_frame_header *) fc_frame_header_get(fp);
536         /* Copy FC Frame header and payload into the frame */
537         memcpy(fh, buf, frame_len);
538
539         if (l2_oxid != FC_XID_UNKNOWN)
540                 fh->fh_ox_id = htons(l2_oxid);
541
542         skb = fp_skb(fp);
543
544         if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
545             (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
546
547                 if (fh->fh_type == FC_TYPE_ELS) {
548                         op = fc_frame_payload_op(fp);
549                         if ((op == ELS_TEST) || (op == ELS_ESTC) ||
550                             (op == ELS_FAN) || (op == ELS_CSU)) {
551                                 /*
552                                  * No need to reply for these
553                                  * ELS requests
554                                  */
555                                 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
556                                 kfree_skb(skb);
557                                 kfree(unsol_els);
558                                 return;
559                         }
560                 }
561                 crc = fcoe_fc_crc(fp);
562                 fc_frame_init(fp);
563                 fr_dev(fp) = lport;
564                 fr_sof(fp) = FC_SOF_I3;
565                 fr_eof(fp) = FC_EOF_T;
566                 fr_crc(fp) = cpu_to_le32(~crc);
567                 unsol_els->lport = lport;
568                 unsol_els->fp = fp;
569                 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
570                 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
571         } else {
572                 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
573                 kfree_skb(skb);
574                 kfree(unsol_els);
575         }
576 }
577
578 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
579 {
580         u8 num_rq;
581         struct fcoe_err_report_entry *err_entry;
582         unsigned char *rq_data;
583         unsigned char *buf = NULL, *buf1;
584         int i;
585         u16 xid;
586         u32 frame_len, len;
587         struct bnx2fc_cmd *io_req = NULL;
588         struct fcoe_task_ctx_entry *task, *task_page;
589         struct bnx2fc_hba *hba = tgt->port->priv;
590         int task_idx, index;
591         int rc = 0;
592
593
594         BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
595         switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
596         case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
597                 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
598                              FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
599
600                 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
601
602                 spin_lock_bh(&tgt->tgt_lock);
603                 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
604                 spin_unlock_bh(&tgt->tgt_lock);
605
606                 if (rq_data) {
607                         buf = rq_data;
608                 } else {
609                         buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
610                                               GFP_ATOMIC);
611
612                         if (!buf1) {
613                                 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
614                                 break;
615                         }
616
617                         for (i = 0; i < num_rq; i++) {
618                                 spin_lock_bh(&tgt->tgt_lock);
619                                 rq_data = (unsigned char *)
620                                            bnx2fc_get_next_rqe(tgt, 1);
621                                 spin_unlock_bh(&tgt->tgt_lock);
622                                 len = BNX2FC_RQ_BUF_SZ;
623                                 memcpy(buf1, rq_data, len);
624                                 buf1 += len;
625                         }
626                 }
627                 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
628                                               FC_XID_UNKNOWN);
629
630                 if (buf != rq_data)
631                         kfree(buf);
632                 spin_lock_bh(&tgt->tgt_lock);
633                 bnx2fc_return_rqe(tgt, num_rq);
634                 spin_unlock_bh(&tgt->tgt_lock);
635                 break;
636
637         case FCOE_ERROR_DETECTION_CQE_TYPE:
638                 /*
639                  * In case of error reporting CQE a single RQ entry
640                  * is consumed.
641                  */
642                 spin_lock_bh(&tgt->tgt_lock);
643                 num_rq = 1;
644                 err_entry = (struct fcoe_err_report_entry *)
645                              bnx2fc_get_next_rqe(tgt, 1);
646                 xid = err_entry->fc_hdr.ox_id;
647                 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
648                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
649                         err_entry->data.err_warn_bitmap_hi,
650                         err_entry->data.err_warn_bitmap_lo);
651                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
652                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
653
654                 bnx2fc_return_rqe(tgt, 1);
655
656                 if (xid > BNX2FC_MAX_XID) {
657                         BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
658                                    xid);
659                         spin_unlock_bh(&tgt->tgt_lock);
660                         break;
661                 }
662
663                 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
664                 index = xid % BNX2FC_TASKS_PER_PAGE;
665                 task_page = (struct fcoe_task_ctx_entry *)
666                                                 hba->task_ctx[task_idx];
667                 task = &(task_page[index]);
668
669                 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
670                 if (!io_req) {
671                         spin_unlock_bh(&tgt->tgt_lock);
672                         break;
673                 }
674
675                 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
676                         printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
677                         spin_unlock_bh(&tgt->tgt_lock);
678                         break;
679                 }
680
681                 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
682                                        &io_req->req_flags)) {
683                         BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
684                                             "progress.. ignore unsol err\n");
685                         spin_unlock_bh(&tgt->tgt_lock);
686                         break;
687                 }
688
689                 /*
690                  * If ABTS is already in progress, and FW error is
691                  * received after that, do not cancel the timeout_work
692                  * and let the error recovery continue by explicitly
693                  * logging out the target, when the ABTS eventually
694                  * times out.
695                  */
696                 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
697                                       &io_req->req_flags)) {
698                         /*
699                          * Cancel the timeout_work, as we received IO
700                          * completion with FW error.
701                          */
702                         if (cancel_delayed_work(&io_req->timeout_work))
703                                 kref_put(&io_req->refcount,
704                                          bnx2fc_cmd_release); /* timer hold */
705
706                         rc = bnx2fc_initiate_abts(io_req);
707                         if (rc != SUCCESS) {
708                                 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
709                                         "failed. issue cleanup\n");
710                                 rc = bnx2fc_initiate_cleanup(io_req);
711                                 BUG_ON(rc);
712                         }
713                 } else
714                         printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
715                                             "in ABTS processing\n", xid);
716                 spin_unlock_bh(&tgt->tgt_lock);
717                 break;
718
719         case FCOE_WARNING_DETECTION_CQE_TYPE:
720                 /*
721                  *In case of warning reporting CQE a single RQ entry
722                  * is consumes.
723                  */
724                 spin_lock_bh(&tgt->tgt_lock);
725                 num_rq = 1;
726                 err_entry = (struct fcoe_err_report_entry *)
727                              bnx2fc_get_next_rqe(tgt, 1);
728                 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
729                 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
730                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
731                         err_entry->data.err_warn_bitmap_hi,
732                         err_entry->data.err_warn_bitmap_lo);
733                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
734                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
735
736                 bnx2fc_return_rqe(tgt, 1);
737                 spin_unlock_bh(&tgt->tgt_lock);
738                 break;
739
740         default:
741                 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
742                 break;
743         }
744 }
745
746 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
747 {
748         struct fcoe_task_ctx_entry *task;
749         struct fcoe_task_ctx_entry *task_page;
750         struct fcoe_port *port = tgt->port;
751         struct bnx2fc_hba *hba = port->priv;
752         struct bnx2fc_cmd *io_req;
753         int task_idx, index;
754         u16 xid;
755         u8  cmd_type;
756         u8 rx_state = 0;
757         u8 num_rq;
758
759         spin_lock_bh(&tgt->tgt_lock);
760         xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
761         if (xid >= BNX2FC_MAX_TASKS) {
762                 printk(KERN_ALERT PFX "ERROR:xid out of range\n");
763                 spin_unlock_bh(&tgt->tgt_lock);
764                 return;
765         }
766         task_idx = xid / BNX2FC_TASKS_PER_PAGE;
767         index = xid % BNX2FC_TASKS_PER_PAGE;
768         task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
769         task = &(task_page[index]);
770
771         num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
772                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
773                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
774
775         io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
776
777         if (io_req == NULL) {
778                 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
779                 spin_unlock_bh(&tgt->tgt_lock);
780                 return;
781         }
782
783         /* Timestamp IO completion time */
784         cmd_type = io_req->cmd_type;
785
786         rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
787                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
788                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
789
790         /* Process other IO completion types */
791         switch (cmd_type) {
792         case BNX2FC_SCSI_CMD:
793                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
794                         bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
795                         spin_unlock_bh(&tgt->tgt_lock);
796                         return;
797                 }
798
799                 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
800                         bnx2fc_process_abts_compl(io_req, task, num_rq);
801                 else if (rx_state ==
802                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
803                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
804                 else
805                         printk(KERN_ERR PFX "Invalid rx state - %d\n",
806                                 rx_state);
807                 break;
808
809         case BNX2FC_TASK_MGMT_CMD:
810                 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
811                 bnx2fc_process_tm_compl(io_req, task, num_rq);
812                 break;
813
814         case BNX2FC_ABTS:
815                 /*
816                  * ABTS request received by firmware. ABTS response
817                  * will be delivered to the task belonging to the IO
818                  * that was aborted
819                  */
820                 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
821                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
822                 break;
823
824         case BNX2FC_ELS:
825                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
826                         bnx2fc_process_els_compl(io_req, task, num_rq);
827                 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
828                         bnx2fc_process_abts_compl(io_req, task, num_rq);
829                 else if (rx_state ==
830                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
831                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
832                 else
833                         printk(KERN_ERR PFX "Invalid rx state =  %d\n",
834                                 rx_state);
835                 break;
836
837         case BNX2FC_CLEANUP:
838                 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
839                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
840                 break;
841
842         default:
843                 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
844                 break;
845         }
846         spin_unlock_bh(&tgt->tgt_lock);
847 }
848
849 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
850 {
851         struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
852         u32 msg;
853
854         wmb();
855         rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
856                         FCOE_CQE_TOGGLE_BIT_SHIFT);
857         msg = *((u32 *)rx_db);
858         writel(cpu_to_le32(msg), tgt->ctx_base);
859         mmiowb();
860
861 }
862
863 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
864 {
865         struct bnx2fc_work *work;
866         work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
867         if (!work)
868                 return NULL;
869
870         INIT_LIST_HEAD(&work->list);
871         work->tgt = tgt;
872         work->wqe = wqe;
873         return work;
874 }
875
876 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
877 {
878         struct fcoe_cqe *cq;
879         u32 cq_cons;
880         struct fcoe_cqe *cqe;
881         u32 num_free_sqes = 0;
882         u16 wqe;
883
884         /*
885          * cq_lock is a low contention lock used to protect
886          * the CQ data structure from being freed up during
887          * the upload operation
888          */
889         spin_lock_bh(&tgt->cq_lock);
890
891         if (!tgt->cq) {
892                 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
893                 spin_unlock_bh(&tgt->cq_lock);
894                 return 0;
895         }
896         cq = tgt->cq;
897         cq_cons = tgt->cq_cons_idx;
898         cqe = &cq[cq_cons];
899
900         while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
901                (tgt->cq_curr_toggle_bit <<
902                FCOE_CQE_TOGGLE_BIT_SHIFT)) {
903
904                 /* new entry on the cq */
905                 if (wqe & FCOE_CQE_CQE_TYPE) {
906                         /* Unsolicited event notification */
907                         bnx2fc_process_unsol_compl(tgt, wqe);
908                 } else {
909                         /* Pending work request completion */
910                         struct bnx2fc_work *work = NULL;
911                         struct bnx2fc_percpu_s *fps = NULL;
912                         unsigned int cpu = wqe % num_possible_cpus();
913
914                         fps = &per_cpu(bnx2fc_percpu, cpu);
915                         spin_lock_bh(&fps->fp_work_lock);
916                         if (unlikely(!fps->iothread))
917                                 goto unlock;
918
919                         work = bnx2fc_alloc_work(tgt, wqe);
920                         if (work)
921                                 list_add_tail(&work->list,
922                                               &fps->work_list);
923 unlock:
924                         spin_unlock_bh(&fps->fp_work_lock);
925
926                         /* Pending work request completion */
927                         if (fps->iothread && work)
928                                 wake_up_process(fps->iothread);
929                         else
930                                 bnx2fc_process_cq_compl(tgt, wqe);
931                 }
932                 cqe++;
933                 tgt->cq_cons_idx++;
934                 num_free_sqes++;
935
936                 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
937                         tgt->cq_cons_idx = 0;
938                         cqe = cq;
939                         tgt->cq_curr_toggle_bit =
940                                 1 - tgt->cq_curr_toggle_bit;
941                 }
942         }
943         bnx2fc_arm_cq(tgt);
944         atomic_add(num_free_sqes, &tgt->free_sqes);
945         spin_unlock_bh(&tgt->cq_lock);
946         return 0;
947 }
948
949 /**
950  * bnx2fc_fastpath_notification - process global event queue (KCQ)
951  *
952  * @hba:                adapter structure pointer
953  * @new_cqe_kcqe:       pointer to newly DMA'd KCQ entry
954  *
955  * Fast path event notification handler
956  */
957 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
958                                         struct fcoe_kcqe *new_cqe_kcqe)
959 {
960         u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
961         struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
962
963         if (!tgt) {
964                 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
965                 return;
966         }
967
968         bnx2fc_process_new_cqes(tgt);
969 }
970
971 /**
972  * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
973  *
974  * @hba:        adapter structure pointer
975  * @ofld_kcqe:  connection offload kcqe pointer
976  *
977  * handle session offload completion, enable the session if offload is
978  * successful.
979  */
980 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
981                                         struct fcoe_kcqe *ofld_kcqe)
982 {
983         struct bnx2fc_rport             *tgt;
984         struct fcoe_port                *port;
985         u32                             conn_id;
986         u32                             context_id;
987         int                             rc;
988
989         conn_id = ofld_kcqe->fcoe_conn_id;
990         context_id = ofld_kcqe->fcoe_conn_context_id;
991         tgt = hba->tgt_ofld_list[conn_id];
992         if (!tgt) {
993                 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
994                 return;
995         }
996         BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
997                 ofld_kcqe->fcoe_conn_context_id);
998         port = tgt->port;
999         if (hba != tgt->port->priv) {
1000                 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1001                 goto ofld_cmpl_err;
1002         }
1003         /*
1004          * cnic has allocated a context_id for this session; use this
1005          * while enabling the session.
1006          */
1007         tgt->context_id = context_id;
1008         if (ofld_kcqe->completion_status) {
1009                 if (ofld_kcqe->completion_status ==
1010                                 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1011                         printk(KERN_ERR PFX "unable to allocate FCoE context "
1012                                 "resources\n");
1013                         set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1014                 }
1015                 goto ofld_cmpl_err;
1016         } else {
1017
1018                 /* now enable the session */
1019                 rc = bnx2fc_send_session_enable_req(port, tgt);
1020                 if (rc) {
1021                         printk(KERN_ALERT PFX "enable session failed\n");
1022                         goto ofld_cmpl_err;
1023                 }
1024         }
1025         return;
1026 ofld_cmpl_err:
1027         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1028         wake_up_interruptible(&tgt->ofld_wait);
1029 }
1030
1031 /**
1032  * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1033  *
1034  * @hba:        adapter structure pointer
1035  * @ofld_kcqe:  connection offload kcqe pointer
1036  *
1037  * handle session enable completion, mark the rport as ready
1038  */
1039
1040 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1041                                                 struct fcoe_kcqe *ofld_kcqe)
1042 {
1043         struct bnx2fc_rport             *tgt;
1044         u32                             conn_id;
1045         u32                             context_id;
1046
1047         context_id = ofld_kcqe->fcoe_conn_context_id;
1048         conn_id = ofld_kcqe->fcoe_conn_id;
1049         tgt = hba->tgt_ofld_list[conn_id];
1050         if (!tgt) {
1051                 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1052                 return;
1053         }
1054
1055         BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1056                 ofld_kcqe->fcoe_conn_context_id);
1057
1058         /*
1059          * context_id should be the same for this target during offload
1060          * and enable
1061          */
1062         if (tgt->context_id != context_id) {
1063                 printk(KERN_ALERT PFX "context id mis-match\n");
1064                 return;
1065         }
1066         if (hba != tgt->port->priv) {
1067                 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1068                 goto enbl_cmpl_err;
1069         }
1070         if (ofld_kcqe->completion_status) {
1071                 goto enbl_cmpl_err;
1072         } else {
1073                 /* enable successful - rport ready for issuing IOs */
1074                 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1075                 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1076                 wake_up_interruptible(&tgt->ofld_wait);
1077         }
1078         return;
1079
1080 enbl_cmpl_err:
1081         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1082         wake_up_interruptible(&tgt->ofld_wait);
1083 }
1084
1085 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1086                                         struct fcoe_kcqe *disable_kcqe)
1087 {
1088
1089         struct bnx2fc_rport             *tgt;
1090         u32                             conn_id;
1091
1092         conn_id = disable_kcqe->fcoe_conn_id;
1093         tgt = hba->tgt_ofld_list[conn_id];
1094         if (!tgt) {
1095                 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
1096                 return;
1097         }
1098
1099         BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1100
1101         if (disable_kcqe->completion_status) {
1102                 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
1103                         disable_kcqe->completion_status);
1104                 return;
1105         } else {
1106                 /* disable successful */
1107                 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1108                 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1109                 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1110                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1111                 wake_up_interruptible(&tgt->upld_wait);
1112         }
1113 }
1114
1115 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1116                                         struct fcoe_kcqe *destroy_kcqe)
1117 {
1118         struct bnx2fc_rport             *tgt;
1119         u32                             conn_id;
1120
1121         conn_id = destroy_kcqe->fcoe_conn_id;
1122         tgt = hba->tgt_ofld_list[conn_id];
1123         if (!tgt) {
1124                 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
1125                 return;
1126         }
1127
1128         BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1129
1130         if (destroy_kcqe->completion_status) {
1131                 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
1132                         destroy_kcqe->completion_status);
1133                 return;
1134         } else {
1135                 /* destroy successful */
1136                 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1137                 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1138                 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1139                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1140                 wake_up_interruptible(&tgt->upld_wait);
1141         }
1142 }
1143
1144 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1145 {
1146         switch (err_code) {
1147         case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1148                 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1149                 break;
1150
1151         case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1152                 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1153                 break;
1154
1155         case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1156                 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1157                 break;
1158         case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1159                 printk(KERN_ERR PFX "init failure due to compl status err\n");
1160                 break;
1161         case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1162                 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1163         default:
1164                 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1165         }
1166 }
1167
1168 /**
1169  * bnx2fc_indicae_kcqe - process KCQE
1170  *
1171  * @hba:        adapter structure pointer
1172  * @kcqe:       kcqe pointer
1173  * @num_cqe:    Number of completion queue elements
1174  *
1175  * Generic KCQ event handler
1176  */
1177 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1178                                         u32 num_cqe)
1179 {
1180         struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1181         int i = 0;
1182         struct fcoe_kcqe *kcqe = NULL;
1183
1184         while (i < num_cqe) {
1185                 kcqe = (struct fcoe_kcqe *) kcq[i++];
1186
1187                 switch (kcqe->op_code) {
1188                 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1189                         bnx2fc_fastpath_notification(hba, kcqe);
1190                         break;
1191
1192                 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1193                         bnx2fc_process_ofld_cmpl(hba, kcqe);
1194                         break;
1195
1196                 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1197                         bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1198                         break;
1199
1200                 case FCOE_KCQE_OPCODE_INIT_FUNC:
1201                         if (kcqe->completion_status !=
1202                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1203                                 bnx2fc_init_failure(hba,
1204                                                 kcqe->completion_status);
1205                         } else {
1206                                 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1207                                 bnx2fc_get_link_state(hba);
1208                                 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1209                                         (u8)hba->pcidev->bus->number);
1210                         }
1211                         break;
1212
1213                 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1214                         if (kcqe->completion_status !=
1215                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1216
1217                                 printk(KERN_ERR PFX "DESTROY failed\n");
1218                         } else {
1219                                 printk(KERN_ERR PFX "DESTROY success\n");
1220                         }
1221                         hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
1222                         wake_up_interruptible(&hba->destroy_wait);
1223                         break;
1224
1225                 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1226                         bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1227                         break;
1228
1229                 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1230                         bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1231                         break;
1232
1233                 case FCOE_KCQE_OPCODE_STAT_FUNC:
1234                         if (kcqe->completion_status !=
1235                             FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1236                                 printk(KERN_ERR PFX "STAT failed\n");
1237                         complete(&hba->stat_req_done);
1238                         break;
1239
1240                 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1241                         /* fall thru */
1242                 default:
1243                         printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
1244                                                                 kcqe->op_code);
1245                 }
1246         }
1247 }
1248
1249 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1250 {
1251         struct fcoe_sqe *sqe;
1252
1253         sqe = &tgt->sq[tgt->sq_prod_idx];
1254
1255         /* Fill SQ WQE */
1256         sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1257         sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1258
1259         /* Advance SQ Prod Idx */
1260         if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1261                 tgt->sq_prod_idx = 0;
1262                 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1263         }
1264 }
1265
1266 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1267 {
1268         struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1269         u32 msg;
1270
1271         wmb();
1272         sq_db->prod = tgt->sq_prod_idx |
1273                                 (tgt->sq_curr_toggle_bit << 15);
1274         msg = *((u32 *)sq_db);
1275         writel(cpu_to_le32(msg), tgt->ctx_base);
1276         mmiowb();
1277
1278 }
1279
1280 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1281 {
1282         u32 context_id = tgt->context_id;
1283         struct fcoe_port *port = tgt->port;
1284         u32 reg_off;
1285         resource_size_t reg_base;
1286         struct bnx2fc_hba *hba = port->priv;
1287
1288         reg_base = pci_resource_start(hba->pcidev,
1289                                         BNX2X_DOORBELL_PCI_BAR);
1290         reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1291                         (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1292         tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1293         if (!tgt->ctx_base)
1294                 return -ENOMEM;
1295         return 0;
1296 }
1297
1298 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1299 {
1300         char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1301
1302         if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1303                 return NULL;
1304
1305         tgt->rq_cons_idx += num_items;
1306
1307         if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1308                 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1309
1310         return buf;
1311 }
1312
1313 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1314 {
1315         /* return the rq buffer */
1316         u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1317         if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1318                 /* Wrap around RQ */
1319                 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1320         }
1321         tgt->rq_prod_idx = next_prod_idx;
1322         tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1323 }
1324
1325 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1326                               struct fcoe_task_ctx_entry *task,
1327                               u16 orig_xid)
1328 {
1329         u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1330         struct bnx2fc_rport *tgt = io_req->tgt;
1331         u32 context_id = tgt->context_id;
1332
1333         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1334
1335         /* Tx Write Rx Read */
1336         /* init flags */
1337         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1338                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1339         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1340                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1341         task->txwr_rxrd.const_ctx.init_flags |=
1342                                 FCOE_TASK_DEV_TYPE_DISK <<
1343                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1344         task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1345
1346         /* Tx flags */
1347         task->txwr_rxrd.const_ctx.tx_flags =
1348                                 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1349                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1350
1351         /* Rx Read Tx Write */
1352         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1353                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1354         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1355                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1356 }
1357
1358 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1359                                 struct fcoe_task_ctx_entry *task)
1360 {
1361         struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1362         struct bnx2fc_rport *tgt = io_req->tgt;
1363         struct fc_frame_header *fc_hdr;
1364         struct fcoe_ext_mul_sges_ctx *sgl;
1365         u8 task_type = 0;
1366         u64 *hdr;
1367         u64 temp_hdr[3];
1368         u32 context_id;
1369
1370
1371         /* Obtain task_type */
1372         if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1373             (io_req->cmd_type == BNX2FC_ELS)) {
1374                 task_type = FCOE_TASK_TYPE_MIDPATH;
1375         } else if (io_req->cmd_type == BNX2FC_ABTS) {
1376                 task_type = FCOE_TASK_TYPE_ABTS;
1377         }
1378
1379         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1380
1381         /* Setup the task from io_req for easy reference */
1382         io_req->task = task;
1383
1384         BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1385                 io_req->cmd_type, task_type);
1386
1387         /* Tx only */
1388         if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1389             (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1390                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1391                                 (u32)mp_req->mp_req_bd_dma;
1392                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1393                                 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1394                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1395         }
1396
1397         /* Tx Write Rx Read */
1398         /* init flags */
1399         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1400                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1401         task->txwr_rxrd.const_ctx.init_flags |=
1402                                 FCOE_TASK_DEV_TYPE_DISK <<
1403                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1404         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1405                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1406
1407         /* tx flags */
1408         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1409                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1410
1411         /* Rx Write Tx Read */
1412         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1413
1414         /* rx flags */
1415         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1416                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1417
1418         context_id = tgt->context_id;
1419         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1420                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1421
1422         fc_hdr = &(mp_req->req_fc_hdr);
1423         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1424                 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1425                 fc_hdr->fh_rx_id = htons(0xffff);
1426                 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1427         } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1428                 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1429         }
1430
1431         /* Fill FC Header into middle path buffer */
1432         hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1433         memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1434         hdr[0] = cpu_to_be64(temp_hdr[0]);
1435         hdr[1] = cpu_to_be64(temp_hdr[1]);
1436         hdr[2] = cpu_to_be64(temp_hdr[2]);
1437
1438         /* Rx Only */
1439         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1440                 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1441
1442                 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1443                 sgl->mul_sgl.cur_sge_addr.hi =
1444                                 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1445                 sgl->mul_sgl.sgl_size = 1;
1446         }
1447 }
1448
1449 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1450                              struct fcoe_task_ctx_entry *task)
1451 {
1452         u8 task_type;
1453         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1454         struct io_bdt *bd_tbl = io_req->bd_tbl;
1455         struct bnx2fc_rport *tgt = io_req->tgt;
1456         struct fcoe_cached_sge_ctx *cached_sge;
1457         struct fcoe_ext_mul_sges_ctx *sgl;
1458         u64 *fcp_cmnd;
1459         u64 tmp_fcp_cmnd[4];
1460         u32 context_id;
1461         int cnt, i;
1462         int bd_count;
1463
1464         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1465
1466         /* Setup the task from io_req for easy reference */
1467         io_req->task = task;
1468
1469         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1470                 task_type = FCOE_TASK_TYPE_WRITE;
1471         else
1472                 task_type = FCOE_TASK_TYPE_READ;
1473
1474         /* Tx only */
1475         if (task_type == FCOE_TASK_TYPE_WRITE) {
1476                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1477                                 (u32)bd_tbl->bd_tbl_dma;
1478                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1479                                 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1480                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1481                                 bd_tbl->bd_valid;
1482         }
1483
1484         /*Tx Write Rx Read */
1485         /* Init state to NORMAL */
1486         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1487                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1488         task->txwr_rxrd.const_ctx.init_flags |=
1489                                 FCOE_TASK_DEV_TYPE_DISK <<
1490                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1491         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1492                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1493         /* tx flags */
1494         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1495                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1496
1497         /* Set initial seq counter */
1498         task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1499
1500         /* Fill FCP_CMND IU */
1501         fcp_cmnd = (u64 *)
1502                     task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1503         bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1504
1505         /* swap fcp_cmnd */
1506         cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1507
1508         for (i = 0; i < cnt; i++) {
1509                 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1510                 fcp_cmnd++;
1511         }
1512
1513         /* Rx Write Tx Read */
1514         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1515
1516         context_id = tgt->context_id;
1517         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1518                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1519
1520         /* rx flags */
1521         /* Set state to "waiting for the first packet" */
1522         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1523                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1524
1525         task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1526
1527         /* Rx Only */
1528         cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1529         sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1530         bd_count = bd_tbl->bd_valid;
1531         if (task_type == FCOE_TASK_TYPE_READ) {
1532                 if (bd_count == 1) {
1533
1534                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1535
1536                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1537                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1538                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1539                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1540                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1541                 } else if (bd_count == 2) {
1542                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1543
1544                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1545                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1546                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1547
1548                         fcoe_bd_tbl++;
1549                         cached_sge->second_buf_addr.lo =
1550                                                  fcoe_bd_tbl->buf_addr_lo;
1551                         cached_sge->second_buf_addr.hi =
1552                                                 fcoe_bd_tbl->buf_addr_hi;
1553                         cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1554                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1555                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1556                 } else {
1557
1558                         sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1559                         sgl->mul_sgl.cur_sge_addr.hi =
1560                                         (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1561                         sgl->mul_sgl.sgl_size = bd_count;
1562                 }
1563         }
1564 }
1565
1566 /**
1567  * bnx2fc_setup_task_ctx - allocate and map task context
1568  *
1569  * @hba:        pointer to adapter structure
1570  *
1571  * allocate memory for task context, and associated BD table to be used
1572  * by firmware
1573  *
1574  */
1575 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1576 {
1577         int rc = 0;
1578         struct regpair *task_ctx_bdt;
1579         dma_addr_t addr;
1580         int i;
1581
1582         /*
1583          * Allocate task context bd table. A page size of bd table
1584          * can map 256 buffers. Each buffer contains 32 task context
1585          * entries. Hence the limit with one page is 8192 task context
1586          * entries.
1587          */
1588         hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1589                                                   PAGE_SIZE,
1590                                                   &hba->task_ctx_bd_dma,
1591                                                   GFP_KERNEL);
1592         if (!hba->task_ctx_bd_tbl) {
1593                 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1594                 rc = -1;
1595                 goto out;
1596         }
1597         memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1598
1599         /*
1600          * Allocate task_ctx which is an array of pointers pointing to
1601          * a page containing 32 task contexts
1602          */
1603         hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1604                                  GFP_KERNEL);
1605         if (!hba->task_ctx) {
1606                 printk(KERN_ERR PFX "unable to allocate task context array\n");
1607                 rc = -1;
1608                 goto out1;
1609         }
1610
1611         /*
1612          * Allocate task_ctx_dma which is an array of dma addresses
1613          */
1614         hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1615                                         sizeof(dma_addr_t)), GFP_KERNEL);
1616         if (!hba->task_ctx_dma) {
1617                 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1618                 rc = -1;
1619                 goto out2;
1620         }
1621
1622         task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1623         for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1624
1625                 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1626                                                       PAGE_SIZE,
1627                                                       &hba->task_ctx_dma[i],
1628                                                       GFP_KERNEL);
1629                 if (!hba->task_ctx[i]) {
1630                         printk(KERN_ERR PFX "unable to alloc task context\n");
1631                         rc = -1;
1632                         goto out3;
1633                 }
1634                 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1635                 addr = (u64)hba->task_ctx_dma[i];
1636                 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1637                 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1638                 task_ctx_bdt++;
1639         }
1640         return 0;
1641
1642 out3:
1643         for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1644                 if (hba->task_ctx[i]) {
1645
1646                         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1647                                 hba->task_ctx[i], hba->task_ctx_dma[i]);
1648                         hba->task_ctx[i] = NULL;
1649                 }
1650         }
1651
1652         kfree(hba->task_ctx_dma);
1653         hba->task_ctx_dma = NULL;
1654 out2:
1655         kfree(hba->task_ctx);
1656         hba->task_ctx = NULL;
1657 out1:
1658         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1659                         hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1660         hba->task_ctx_bd_tbl = NULL;
1661 out:
1662         return rc;
1663 }
1664
1665 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1666 {
1667         int i;
1668
1669         if (hba->task_ctx_bd_tbl) {
1670                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1671                                     hba->task_ctx_bd_tbl,
1672                                     hba->task_ctx_bd_dma);
1673                 hba->task_ctx_bd_tbl = NULL;
1674         }
1675
1676         if (hba->task_ctx) {
1677                 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1678                         if (hba->task_ctx[i]) {
1679                                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1680                                                     hba->task_ctx[i],
1681                                                     hba->task_ctx_dma[i]);
1682                                 hba->task_ctx[i] = NULL;
1683                         }
1684                 }
1685                 kfree(hba->task_ctx);
1686                 hba->task_ctx = NULL;
1687         }
1688
1689         kfree(hba->task_ctx_dma);
1690         hba->task_ctx_dma = NULL;
1691 }
1692
1693 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1694 {
1695         int i;
1696         int segment_count;
1697         int hash_table_size;
1698         u32 *pbl;
1699
1700         segment_count = hba->hash_tbl_segment_count;
1701         hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1702                 sizeof(struct fcoe_hash_table_entry);
1703
1704         pbl = hba->hash_tbl_pbl;
1705         for (i = 0; i < segment_count; ++i) {
1706                 dma_addr_t dma_address;
1707
1708                 dma_address = le32_to_cpu(*pbl);
1709                 ++pbl;
1710                 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1711                 ++pbl;
1712                 dma_free_coherent(&hba->pcidev->dev,
1713                                   BNX2FC_HASH_TBL_CHUNK_SIZE,
1714                                   hba->hash_tbl_segments[i],
1715                                   dma_address);
1716
1717         }
1718
1719         if (hba->hash_tbl_pbl) {
1720                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1721                                     hba->hash_tbl_pbl,
1722                                     hba->hash_tbl_pbl_dma);
1723                 hba->hash_tbl_pbl = NULL;
1724         }
1725 }
1726
1727 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1728 {
1729         int i;
1730         int hash_table_size;
1731         int segment_count;
1732         int segment_array_size;
1733         int dma_segment_array_size;
1734         dma_addr_t *dma_segment_array;
1735         u32 *pbl;
1736
1737         hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1738                 sizeof(struct fcoe_hash_table_entry);
1739
1740         segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1741         segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1742         hba->hash_tbl_segment_count = segment_count;
1743
1744         segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1745         hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1746         if (!hba->hash_tbl_segments) {
1747                 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1748                 return -ENOMEM;
1749         }
1750         dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1751         dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1752         if (!dma_segment_array) {
1753                 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1754                 return -ENOMEM;
1755         }
1756
1757         for (i = 0; i < segment_count; ++i) {
1758                 hba->hash_tbl_segments[i] =
1759                         dma_alloc_coherent(&hba->pcidev->dev,
1760                                            BNX2FC_HASH_TBL_CHUNK_SIZE,
1761                                            &dma_segment_array[i],
1762                                            GFP_KERNEL);
1763                 if (!hba->hash_tbl_segments[i]) {
1764                         printk(KERN_ERR PFX "hash segment alloc failed\n");
1765                         while (--i >= 0) {
1766                                 dma_free_coherent(&hba->pcidev->dev,
1767                                                     BNX2FC_HASH_TBL_CHUNK_SIZE,
1768                                                     hba->hash_tbl_segments[i],
1769                                                     dma_segment_array[i]);
1770                                 hba->hash_tbl_segments[i] = NULL;
1771                         }
1772                         kfree(dma_segment_array);
1773                         return -ENOMEM;
1774                 }
1775                 memset(hba->hash_tbl_segments[i], 0,
1776                        BNX2FC_HASH_TBL_CHUNK_SIZE);
1777         }
1778
1779         hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1780                                                PAGE_SIZE,
1781                                                &hba->hash_tbl_pbl_dma,
1782                                                GFP_KERNEL);
1783         if (!hba->hash_tbl_pbl) {
1784                 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1785                 kfree(dma_segment_array);
1786                 return -ENOMEM;
1787         }
1788         memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1789
1790         pbl = hba->hash_tbl_pbl;
1791         for (i = 0; i < segment_count; ++i) {
1792                 u64 paddr = dma_segment_array[i];
1793                 *pbl = cpu_to_le32((u32) paddr);
1794                 ++pbl;
1795                 *pbl = cpu_to_le32((u32) (paddr >> 32));
1796                 ++pbl;
1797         }
1798         pbl = hba->hash_tbl_pbl;
1799         i = 0;
1800         while (*pbl && *(pbl + 1)) {
1801                 u32 lo;
1802                 u32 hi;
1803                 lo = *pbl;
1804                 ++pbl;
1805                 hi = *pbl;
1806                 ++pbl;
1807                 ++i;
1808         }
1809         kfree(dma_segment_array);
1810         return 0;
1811 }
1812
1813 /**
1814  * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1815  *
1816  * @hba:        Pointer to adapter structure
1817  *
1818  */
1819 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1820 {
1821         u64 addr;
1822         u32 mem_size;
1823         int i;
1824
1825         if (bnx2fc_allocate_hash_table(hba))
1826                 return -ENOMEM;
1827
1828         mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1829         hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1830                                                   &hba->t2_hash_tbl_ptr_dma,
1831                                                   GFP_KERNEL);
1832         if (!hba->t2_hash_tbl_ptr) {
1833                 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1834                 bnx2fc_free_fw_resc(hba);
1835                 return -ENOMEM;
1836         }
1837         memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1838
1839         mem_size = BNX2FC_NUM_MAX_SESS *
1840                                 sizeof(struct fcoe_t2_hash_table_entry);
1841         hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1842                                               &hba->t2_hash_tbl_dma,
1843                                               GFP_KERNEL);
1844         if (!hba->t2_hash_tbl) {
1845                 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1846                 bnx2fc_free_fw_resc(hba);
1847                 return -ENOMEM;
1848         }
1849         memset(hba->t2_hash_tbl, 0x00, mem_size);
1850         for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1851                 addr = (unsigned long) hba->t2_hash_tbl_dma +
1852                          ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1853                 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1854                 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1855         }
1856
1857         hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1858                                                PAGE_SIZE, &hba->dummy_buf_dma,
1859                                                GFP_KERNEL);
1860         if (!hba->dummy_buffer) {
1861                 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
1862                 bnx2fc_free_fw_resc(hba);
1863                 return -ENOMEM;
1864         }
1865
1866         hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1867                                                PAGE_SIZE,
1868                                                &hba->stats_buf_dma,
1869                                                GFP_KERNEL);
1870         if (!hba->stats_buffer) {
1871                 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
1872                 bnx2fc_free_fw_resc(hba);
1873                 return -ENOMEM;
1874         }
1875         memset(hba->stats_buffer, 0x00, PAGE_SIZE);
1876
1877         return 0;
1878 }
1879
1880 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
1881 {
1882         u32 mem_size;
1883
1884         if (hba->stats_buffer) {
1885                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1886                                   hba->stats_buffer, hba->stats_buf_dma);
1887                 hba->stats_buffer = NULL;
1888         }
1889
1890         if (hba->dummy_buffer) {
1891                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1892                                   hba->dummy_buffer, hba->dummy_buf_dma);
1893                 hba->dummy_buffer = NULL;
1894         }
1895
1896         if (hba->t2_hash_tbl_ptr) {
1897                 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1898                 dma_free_coherent(&hba->pcidev->dev, mem_size,
1899                                     hba->t2_hash_tbl_ptr,
1900                                     hba->t2_hash_tbl_ptr_dma);
1901                 hba->t2_hash_tbl_ptr = NULL;
1902         }
1903
1904         if (hba->t2_hash_tbl) {
1905                 mem_size = BNX2FC_NUM_MAX_SESS *
1906                             sizeof(struct fcoe_t2_hash_table_entry);
1907                 dma_free_coherent(&hba->pcidev->dev, mem_size,
1908                                     hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
1909                 hba->t2_hash_tbl = NULL;
1910         }
1911         bnx2fc_free_hash_table(hba);
1912 }