1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
16 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19 struct fcoe_kcqe *new_cqe_kcqe);
20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21 struct fcoe_kcqe *ofld_kcqe);
22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe);
24 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy);
28 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
30 struct fcoe_kwqe_stat stat_req;
31 struct kwqe *kwqe_arr[2];
35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
43 kwqe_arr[0] = (struct kwqe *) &stat_req;
45 if (hba->cnic && hba->cnic->submit_kwqes)
46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
54 * @hba: adapter structure pointer
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
62 struct fcoe_kwqe_init1 fcoe_init1;
63 struct fcoe_kwqe_init2 fcoe_init2;
64 struct fcoe_kwqe_init3 fcoe_init3;
65 struct kwqe *kwqe_arr[3];
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88 fcoe_init1.task_list_pbl_addr_hi =
89 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90 fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
92 fcoe_init1.flags = (PAGE_SHIFT <<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
106 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
107 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
108 ((u64) hba->hash_tbl_pbl_dma >> 32);
110 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
111 fcoe_init2.t2_hash_tbl_addr_hi = (u32)
112 ((u64) hba->t2_hash_tbl_dma >> 32);
114 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
115 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
116 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
118 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
120 /* fill init3 KWQE */
121 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
122 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
123 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
124 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
125 fcoe_init3.error_bit_map_lo = 0xffffffff;
126 fcoe_init3.error_bit_map_hi = 0xffffffff;
128 fcoe_init3.perf_config = 1;
130 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
131 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
132 kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
134 if (hba->cnic && hba->cnic->submit_kwqes)
135 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
139 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
141 struct fcoe_kwqe_destroy fcoe_destroy;
142 struct kwqe *kwqe_arr[2];
146 /* fill destroy KWQE */
147 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
148 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
149 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
150 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
151 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
153 if (hba->cnic && hba->cnic->submit_kwqes)
154 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
159 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
161 * @port: port structure pointer
162 * @tgt: bnx2fc_rport structure pointer
164 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
165 struct bnx2fc_rport *tgt)
167 struct fc_lport *lport = port->lport;
168 struct bnx2fc_hba *hba = port->priv;
169 struct kwqe *kwqe_arr[4];
170 struct fcoe_kwqe_conn_offload1 ofld_req1;
171 struct fcoe_kwqe_conn_offload2 ofld_req2;
172 struct fcoe_kwqe_conn_offload3 ofld_req3;
173 struct fcoe_kwqe_conn_offload4 ofld_req4;
174 struct fc_rport_priv *rdata = tgt->rdata;
175 struct fc_rport *rport = tgt->rport;
181 /* Initialize offload request 1 structure */
182 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
184 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
185 ofld_req1.hdr.flags =
186 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
189 conn_id = (u16)tgt->fcoe_conn_id;
190 ofld_req1.fcoe_conn_id = conn_id;
193 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
194 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
196 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
197 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
199 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
200 ofld_req1.rq_first_pbe_addr_hi =
201 (u32)((u64) tgt->rq_dma >> 32);
203 ofld_req1.rq_prod = 0x8000;
205 /* Initialize offload request 2 structure */
206 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
208 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
209 ofld_req2.hdr.flags =
210 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
212 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
214 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
215 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
217 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
218 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
220 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
221 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
223 /* Initialize offload request 3 structure */
224 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
226 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
227 ofld_req3.hdr.flags =
228 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
230 ofld_req3.vlan_tag = hba->vlan_id <<
231 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
232 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
234 port_id = fc_host_port_id(lport->host);
236 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
241 * Store s_id of the initiator for further reference. This will
242 * be used during disable/destroy during linkdown processing as
243 * when the lport is reset, the port_id also is reset to 0
246 ofld_req3.s_id[0] = (port_id & 0x000000FF);
247 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
248 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
250 port_id = rport->port_id;
251 ofld_req3.d_id[0] = (port_id & 0x000000FF);
252 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
253 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
255 ofld_req3.tx_total_conc_seqs = rdata->max_seq;
257 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
258 ofld_req3.rx_max_fc_pay_len = lport->mfs;
260 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
261 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
262 ofld_req3.rx_open_seqs_exch_c3 = 1;
264 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
265 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
267 /* set mul_n_port_ids supported flag to 0, until it is supported */
270 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
271 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
273 /* Info from PLOGI response */
274 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
275 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
277 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
281 ofld_req3.flags |= (hba->vlan_enabled <<
282 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
284 /* C2_VALID and ACK flags are not set as they are not suppported */
287 /* Initialize offload request 4 structure */
288 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
289 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
290 ofld_req4.hdr.flags =
291 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
293 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
296 ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
298 ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
299 ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
300 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
301 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
302 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
303 ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
304 ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
305 ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
306 ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
307 ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
308 ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
310 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
311 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
313 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
314 ofld_req4.confq_pbl_base_addr_hi =
315 (u32)((u64) tgt->confq_pbl_dma >> 32);
317 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
318 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
319 kwqe_arr[2] = (struct kwqe *) &ofld_req3;
320 kwqe_arr[3] = (struct kwqe *) &ofld_req4;
322 if (hba->cnic && hba->cnic->submit_kwqes)
323 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
329 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
331 * @port: port structure pointer
332 * @tgt: bnx2fc_rport structure pointer
334 static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
335 struct bnx2fc_rport *tgt)
337 struct kwqe *kwqe_arr[2];
338 struct bnx2fc_hba *hba = port->priv;
339 struct fcoe_kwqe_conn_enable_disable enbl_req;
340 struct fc_lport *lport = port->lport;
341 struct fc_rport *rport = tgt->rport;
346 memset(&enbl_req, 0x00,
347 sizeof(struct fcoe_kwqe_conn_enable_disable));
348 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
350 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
352 enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
354 enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
355 enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
356 enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
357 enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
358 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
359 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
361 enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
362 enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
363 enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
364 enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
365 enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
366 enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
368 port_id = fc_host_port_id(lport->host);
369 if (port_id != tgt->sid) {
370 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
371 "sid = 0x%x\n", port_id, tgt->sid);
374 enbl_req.s_id[0] = (port_id & 0x000000FF);
375 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
376 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
378 port_id = rport->port_id;
379 enbl_req.d_id[0] = (port_id & 0x000000FF);
380 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
381 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
382 enbl_req.vlan_tag = hba->vlan_id <<
383 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
384 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
385 enbl_req.vlan_flag = hba->vlan_enabled;
386 enbl_req.context_id = tgt->context_id;
387 enbl_req.conn_id = tgt->fcoe_conn_id;
389 kwqe_arr[0] = (struct kwqe *) &enbl_req;
391 if (hba->cnic && hba->cnic->submit_kwqes)
392 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
397 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
399 * @port: port structure pointer
400 * @tgt: bnx2fc_rport structure pointer
402 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
403 struct bnx2fc_rport *tgt)
405 struct bnx2fc_hba *hba = port->priv;
406 struct fcoe_kwqe_conn_enable_disable disable_req;
407 struct kwqe *kwqe_arr[2];
408 struct fc_rport *rport = tgt->rport;
413 memset(&disable_req, 0x00,
414 sizeof(struct fcoe_kwqe_conn_enable_disable));
415 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
416 disable_req.hdr.flags =
417 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
419 disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
420 disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
421 disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
422 disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
423 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
424 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
426 disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
427 disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
428 disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
429 disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
430 disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
431 disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
434 disable_req.s_id[0] = (port_id & 0x000000FF);
435 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
436 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
439 port_id = rport->port_id;
440 disable_req.d_id[0] = (port_id & 0x000000FF);
441 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
442 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
443 disable_req.context_id = tgt->context_id;
444 disable_req.conn_id = tgt->fcoe_conn_id;
445 disable_req.vlan_tag = hba->vlan_id <<
446 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
447 disable_req.vlan_tag |=
448 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
449 disable_req.vlan_flag = hba->vlan_enabled;
451 kwqe_arr[0] = (struct kwqe *) &disable_req;
453 if (hba->cnic && hba->cnic->submit_kwqes)
454 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
460 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
462 * @port: port structure pointer
463 * @tgt: bnx2fc_rport structure pointer
465 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
466 struct bnx2fc_rport *tgt)
468 struct fcoe_kwqe_conn_destroy destroy_req;
469 struct kwqe *kwqe_arr[2];
473 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
474 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
475 destroy_req.hdr.flags =
476 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
478 destroy_req.context_id = tgt->context_id;
479 destroy_req.conn_id = tgt->fcoe_conn_id;
481 kwqe_arr[0] = (struct kwqe *) &destroy_req;
483 if (hba->cnic && hba->cnic->submit_kwqes)
484 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
489 static void bnx2fc_unsol_els_work(struct work_struct *work)
491 struct bnx2fc_unsol_els *unsol_els;
492 struct fc_lport *lport;
495 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
496 lport = unsol_els->lport;
498 fc_exch_recv(lport, fp);
502 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
504 u32 frame_len, u16 l2_oxid)
506 struct fcoe_port *port = tgt->port;
507 struct fc_lport *lport = port->lport;
508 struct bnx2fc_unsol_els *unsol_els;
509 struct fc_frame_header *fh;
517 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
519 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
523 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
526 payload_len = frame_len - sizeof(struct fc_frame_header);
528 fp = fc_frame_alloc(lport, payload_len);
530 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
535 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
536 /* Copy FC Frame header and payload into the frame */
537 memcpy(fh, buf, frame_len);
539 if (l2_oxid != FC_XID_UNKNOWN)
540 fh->fh_ox_id = htons(l2_oxid);
544 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
545 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
547 if (fh->fh_type == FC_TYPE_ELS) {
548 op = fc_frame_payload_op(fp);
549 if ((op == ELS_TEST) || (op == ELS_ESTC) ||
550 (op == ELS_FAN) || (op == ELS_CSU)) {
552 * No need to reply for these
555 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
561 crc = fcoe_fc_crc(fp);
564 fr_sof(fp) = FC_SOF_I3;
565 fr_eof(fp) = FC_EOF_T;
566 fr_crc(fp) = cpu_to_le32(~crc);
567 unsol_els->lport = lport;
569 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
570 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
572 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
578 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
581 struct fcoe_err_report_entry *err_entry;
582 unsigned char *rq_data;
583 unsigned char *buf = NULL, *buf1;
587 struct bnx2fc_cmd *io_req = NULL;
588 struct fcoe_task_ctx_entry *task, *task_page;
589 struct bnx2fc_hba *hba = tgt->port->priv;
594 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
595 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
596 case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
597 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
598 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
600 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
602 spin_lock_bh(&tgt->tgt_lock);
603 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
604 spin_unlock_bh(&tgt->tgt_lock);
609 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
613 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
617 for (i = 0; i < num_rq; i++) {
618 spin_lock_bh(&tgt->tgt_lock);
619 rq_data = (unsigned char *)
620 bnx2fc_get_next_rqe(tgt, 1);
621 spin_unlock_bh(&tgt->tgt_lock);
622 len = BNX2FC_RQ_BUF_SZ;
623 memcpy(buf1, rq_data, len);
627 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
632 spin_lock_bh(&tgt->tgt_lock);
633 bnx2fc_return_rqe(tgt, num_rq);
634 spin_unlock_bh(&tgt->tgt_lock);
637 case FCOE_ERROR_DETECTION_CQE_TYPE:
639 * In case of error reporting CQE a single RQ entry
642 spin_lock_bh(&tgt->tgt_lock);
644 err_entry = (struct fcoe_err_report_entry *)
645 bnx2fc_get_next_rqe(tgt, 1);
646 xid = err_entry->fc_hdr.ox_id;
647 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
648 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
649 err_entry->data.err_warn_bitmap_hi,
650 err_entry->data.err_warn_bitmap_lo);
651 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
652 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
654 bnx2fc_return_rqe(tgt, 1);
656 if (xid > BNX2FC_MAX_XID) {
657 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
659 spin_unlock_bh(&tgt->tgt_lock);
663 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
664 index = xid % BNX2FC_TASKS_PER_PAGE;
665 task_page = (struct fcoe_task_ctx_entry *)
666 hba->task_ctx[task_idx];
667 task = &(task_page[index]);
669 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
671 spin_unlock_bh(&tgt->tgt_lock);
675 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
676 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
677 spin_unlock_bh(&tgt->tgt_lock);
681 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
682 &io_req->req_flags)) {
683 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
684 "progress.. ignore unsol err\n");
685 spin_unlock_bh(&tgt->tgt_lock);
690 * If ABTS is already in progress, and FW error is
691 * received after that, do not cancel the timeout_work
692 * and let the error recovery continue by explicitly
693 * logging out the target, when the ABTS eventually
696 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
697 &io_req->req_flags)) {
699 * Cancel the timeout_work, as we received IO
700 * completion with FW error.
702 if (cancel_delayed_work(&io_req->timeout_work))
703 kref_put(&io_req->refcount,
704 bnx2fc_cmd_release); /* timer hold */
706 rc = bnx2fc_initiate_abts(io_req);
708 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
709 "failed. issue cleanup\n");
710 rc = bnx2fc_initiate_cleanup(io_req);
714 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
715 "in ABTS processing\n", xid);
716 spin_unlock_bh(&tgt->tgt_lock);
719 case FCOE_WARNING_DETECTION_CQE_TYPE:
721 *In case of warning reporting CQE a single RQ entry
724 spin_lock_bh(&tgt->tgt_lock);
726 err_entry = (struct fcoe_err_report_entry *)
727 bnx2fc_get_next_rqe(tgt, 1);
728 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
729 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
730 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
731 err_entry->data.err_warn_bitmap_hi,
732 err_entry->data.err_warn_bitmap_lo);
733 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
734 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
736 bnx2fc_return_rqe(tgt, 1);
737 spin_unlock_bh(&tgt->tgt_lock);
741 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
746 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
748 struct fcoe_task_ctx_entry *task;
749 struct fcoe_task_ctx_entry *task_page;
750 struct fcoe_port *port = tgt->port;
751 struct bnx2fc_hba *hba = port->priv;
752 struct bnx2fc_cmd *io_req;
759 spin_lock_bh(&tgt->tgt_lock);
760 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
761 if (xid >= BNX2FC_MAX_TASKS) {
762 printk(KERN_ALERT PFX "ERROR:xid out of range\n");
763 spin_unlock_bh(&tgt->tgt_lock);
766 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
767 index = xid % BNX2FC_TASKS_PER_PAGE;
768 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
769 task = &(task_page[index]);
771 num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
772 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
773 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
775 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
777 if (io_req == NULL) {
778 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
779 spin_unlock_bh(&tgt->tgt_lock);
783 /* Timestamp IO completion time */
784 cmd_type = io_req->cmd_type;
786 rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
787 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
788 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
790 /* Process other IO completion types */
792 case BNX2FC_SCSI_CMD:
793 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
794 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
795 spin_unlock_bh(&tgt->tgt_lock);
799 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
800 bnx2fc_process_abts_compl(io_req, task, num_rq);
802 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
803 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
805 printk(KERN_ERR PFX "Invalid rx state - %d\n",
809 case BNX2FC_TASK_MGMT_CMD:
810 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
811 bnx2fc_process_tm_compl(io_req, task, num_rq);
816 * ABTS request received by firmware. ABTS response
817 * will be delivered to the task belonging to the IO
820 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
821 kref_put(&io_req->refcount, bnx2fc_cmd_release);
825 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
826 bnx2fc_process_els_compl(io_req, task, num_rq);
827 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
828 bnx2fc_process_abts_compl(io_req, task, num_rq);
830 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
831 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
833 printk(KERN_ERR PFX "Invalid rx state = %d\n",
838 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
839 kref_put(&io_req->refcount, bnx2fc_cmd_release);
843 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
846 spin_unlock_bh(&tgt->tgt_lock);
849 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
851 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
855 rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
856 FCOE_CQE_TOGGLE_BIT_SHIFT);
857 msg = *((u32 *)rx_db);
858 writel(cpu_to_le32(msg), tgt->ctx_base);
863 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
865 struct bnx2fc_work *work;
866 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
870 INIT_LIST_HEAD(&work->list);
876 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
880 struct fcoe_cqe *cqe;
881 u32 num_free_sqes = 0;
885 * cq_lock is a low contention lock used to protect
886 * the CQ data structure from being freed up during
887 * the upload operation
889 spin_lock_bh(&tgt->cq_lock);
892 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
893 spin_unlock_bh(&tgt->cq_lock);
897 cq_cons = tgt->cq_cons_idx;
900 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
901 (tgt->cq_curr_toggle_bit <<
902 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
904 /* new entry on the cq */
905 if (wqe & FCOE_CQE_CQE_TYPE) {
906 /* Unsolicited event notification */
907 bnx2fc_process_unsol_compl(tgt, wqe);
909 /* Pending work request completion */
910 struct bnx2fc_work *work = NULL;
911 struct bnx2fc_percpu_s *fps = NULL;
912 unsigned int cpu = wqe % num_possible_cpus();
914 fps = &per_cpu(bnx2fc_percpu, cpu);
915 spin_lock_bh(&fps->fp_work_lock);
916 if (unlikely(!fps->iothread))
919 work = bnx2fc_alloc_work(tgt, wqe);
921 list_add_tail(&work->list,
924 spin_unlock_bh(&fps->fp_work_lock);
926 /* Pending work request completion */
927 if (fps->iothread && work)
928 wake_up_process(fps->iothread);
930 bnx2fc_process_cq_compl(tgt, wqe);
936 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
937 tgt->cq_cons_idx = 0;
939 tgt->cq_curr_toggle_bit =
940 1 - tgt->cq_curr_toggle_bit;
944 atomic_add(num_free_sqes, &tgt->free_sqes);
945 spin_unlock_bh(&tgt->cq_lock);
950 * bnx2fc_fastpath_notification - process global event queue (KCQ)
952 * @hba: adapter structure pointer
953 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
955 * Fast path event notification handler
957 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
958 struct fcoe_kcqe *new_cqe_kcqe)
960 u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
961 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
964 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
968 bnx2fc_process_new_cqes(tgt);
972 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
974 * @hba: adapter structure pointer
975 * @ofld_kcqe: connection offload kcqe pointer
977 * handle session offload completion, enable the session if offload is
980 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
981 struct fcoe_kcqe *ofld_kcqe)
983 struct bnx2fc_rport *tgt;
984 struct fcoe_port *port;
989 conn_id = ofld_kcqe->fcoe_conn_id;
990 context_id = ofld_kcqe->fcoe_conn_context_id;
991 tgt = hba->tgt_ofld_list[conn_id];
993 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
996 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
997 ofld_kcqe->fcoe_conn_context_id);
999 if (hba != tgt->port->priv) {
1000 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1004 * cnic has allocated a context_id for this session; use this
1005 * while enabling the session.
1007 tgt->context_id = context_id;
1008 if (ofld_kcqe->completion_status) {
1009 if (ofld_kcqe->completion_status ==
1010 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1011 printk(KERN_ERR PFX "unable to allocate FCoE context "
1013 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1018 /* now enable the session */
1019 rc = bnx2fc_send_session_enable_req(port, tgt);
1021 printk(KERN_ALERT PFX "enable session failed\n");
1027 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1028 wake_up_interruptible(&tgt->ofld_wait);
1032 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1034 * @hba: adapter structure pointer
1035 * @ofld_kcqe: connection offload kcqe pointer
1037 * handle session enable completion, mark the rport as ready
1040 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1041 struct fcoe_kcqe *ofld_kcqe)
1043 struct bnx2fc_rport *tgt;
1047 context_id = ofld_kcqe->fcoe_conn_context_id;
1048 conn_id = ofld_kcqe->fcoe_conn_id;
1049 tgt = hba->tgt_ofld_list[conn_id];
1051 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1055 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1056 ofld_kcqe->fcoe_conn_context_id);
1059 * context_id should be the same for this target during offload
1062 if (tgt->context_id != context_id) {
1063 printk(KERN_ALERT PFX "context id mis-match\n");
1066 if (hba != tgt->port->priv) {
1067 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1070 if (ofld_kcqe->completion_status) {
1073 /* enable successful - rport ready for issuing IOs */
1074 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1075 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1076 wake_up_interruptible(&tgt->ofld_wait);
1081 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1082 wake_up_interruptible(&tgt->ofld_wait);
1085 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1086 struct fcoe_kcqe *disable_kcqe)
1089 struct bnx2fc_rport *tgt;
1092 conn_id = disable_kcqe->fcoe_conn_id;
1093 tgt = hba->tgt_ofld_list[conn_id];
1095 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
1099 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1101 if (disable_kcqe->completion_status) {
1102 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
1103 disable_kcqe->completion_status);
1106 /* disable successful */
1107 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1108 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1109 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1110 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1111 wake_up_interruptible(&tgt->upld_wait);
1115 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1116 struct fcoe_kcqe *destroy_kcqe)
1118 struct bnx2fc_rport *tgt;
1121 conn_id = destroy_kcqe->fcoe_conn_id;
1122 tgt = hba->tgt_ofld_list[conn_id];
1124 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
1128 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1130 if (destroy_kcqe->completion_status) {
1131 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
1132 destroy_kcqe->completion_status);
1135 /* destroy successful */
1136 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1137 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1138 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1139 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1140 wake_up_interruptible(&tgt->upld_wait);
1144 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1147 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1148 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1151 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1152 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1155 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1156 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1158 case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1159 printk(KERN_ERR PFX "init failure due to compl status err\n");
1161 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1162 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1164 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1169 * bnx2fc_indicae_kcqe - process KCQE
1171 * @hba: adapter structure pointer
1172 * @kcqe: kcqe pointer
1173 * @num_cqe: Number of completion queue elements
1175 * Generic KCQ event handler
1177 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1180 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1182 struct fcoe_kcqe *kcqe = NULL;
1184 while (i < num_cqe) {
1185 kcqe = (struct fcoe_kcqe *) kcq[i++];
1187 switch (kcqe->op_code) {
1188 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1189 bnx2fc_fastpath_notification(hba, kcqe);
1192 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1193 bnx2fc_process_ofld_cmpl(hba, kcqe);
1196 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1197 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1200 case FCOE_KCQE_OPCODE_INIT_FUNC:
1201 if (kcqe->completion_status !=
1202 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1203 bnx2fc_init_failure(hba,
1204 kcqe->completion_status);
1206 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1207 bnx2fc_get_link_state(hba);
1208 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1209 (u8)hba->pcidev->bus->number);
1213 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1214 if (kcqe->completion_status !=
1215 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1217 printk(KERN_ERR PFX "DESTROY failed\n");
1219 printk(KERN_ERR PFX "DESTROY success\n");
1221 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
1222 wake_up_interruptible(&hba->destroy_wait);
1225 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1226 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1229 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1230 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1233 case FCOE_KCQE_OPCODE_STAT_FUNC:
1234 if (kcqe->completion_status !=
1235 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1236 printk(KERN_ERR PFX "STAT failed\n");
1237 complete(&hba->stat_req_done);
1240 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1243 printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
1249 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1251 struct fcoe_sqe *sqe;
1253 sqe = &tgt->sq[tgt->sq_prod_idx];
1256 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1257 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1259 /* Advance SQ Prod Idx */
1260 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1261 tgt->sq_prod_idx = 0;
1262 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1266 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1268 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1272 sq_db->prod = tgt->sq_prod_idx |
1273 (tgt->sq_curr_toggle_bit << 15);
1274 msg = *((u32 *)sq_db);
1275 writel(cpu_to_le32(msg), tgt->ctx_base);
1280 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1282 u32 context_id = tgt->context_id;
1283 struct fcoe_port *port = tgt->port;
1285 resource_size_t reg_base;
1286 struct bnx2fc_hba *hba = port->priv;
1288 reg_base = pci_resource_start(hba->pcidev,
1289 BNX2X_DOORBELL_PCI_BAR);
1290 reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1291 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1292 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1298 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1300 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1302 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1305 tgt->rq_cons_idx += num_items;
1307 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1308 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1313 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1315 /* return the rq buffer */
1316 u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1317 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1318 /* Wrap around RQ */
1319 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1321 tgt->rq_prod_idx = next_prod_idx;
1322 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1325 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1326 struct fcoe_task_ctx_entry *task,
1329 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1330 struct bnx2fc_rport *tgt = io_req->tgt;
1331 u32 context_id = tgt->context_id;
1333 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1335 /* Tx Write Rx Read */
1337 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1338 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1339 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1340 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1341 task->txwr_rxrd.const_ctx.init_flags |=
1342 FCOE_TASK_DEV_TYPE_DISK <<
1343 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1344 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1347 task->txwr_rxrd.const_ctx.tx_flags =
1348 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1349 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1351 /* Rx Read Tx Write */
1352 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1353 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1354 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1355 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1358 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1359 struct fcoe_task_ctx_entry *task)
1361 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1362 struct bnx2fc_rport *tgt = io_req->tgt;
1363 struct fc_frame_header *fc_hdr;
1364 struct fcoe_ext_mul_sges_ctx *sgl;
1371 /* Obtain task_type */
1372 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1373 (io_req->cmd_type == BNX2FC_ELS)) {
1374 task_type = FCOE_TASK_TYPE_MIDPATH;
1375 } else if (io_req->cmd_type == BNX2FC_ABTS) {
1376 task_type = FCOE_TASK_TYPE_ABTS;
1379 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1381 /* Setup the task from io_req for easy reference */
1382 io_req->task = task;
1384 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1385 io_req->cmd_type, task_type);
1388 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1389 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1390 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1391 (u32)mp_req->mp_req_bd_dma;
1392 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1393 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1394 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1397 /* Tx Write Rx Read */
1399 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1400 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1401 task->txwr_rxrd.const_ctx.init_flags |=
1402 FCOE_TASK_DEV_TYPE_DISK <<
1403 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1404 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1405 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1408 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1409 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1411 /* Rx Write Tx Read */
1412 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1415 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1416 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1418 context_id = tgt->context_id;
1419 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1420 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1422 fc_hdr = &(mp_req->req_fc_hdr);
1423 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1424 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1425 fc_hdr->fh_rx_id = htons(0xffff);
1426 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1427 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1428 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1431 /* Fill FC Header into middle path buffer */
1432 hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1433 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1434 hdr[0] = cpu_to_be64(temp_hdr[0]);
1435 hdr[1] = cpu_to_be64(temp_hdr[1]);
1436 hdr[2] = cpu_to_be64(temp_hdr[2]);
1439 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1440 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1442 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1443 sgl->mul_sgl.cur_sge_addr.hi =
1444 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1445 sgl->mul_sgl.sgl_size = 1;
1449 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1450 struct fcoe_task_ctx_entry *task)
1453 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1454 struct io_bdt *bd_tbl = io_req->bd_tbl;
1455 struct bnx2fc_rport *tgt = io_req->tgt;
1456 struct fcoe_cached_sge_ctx *cached_sge;
1457 struct fcoe_ext_mul_sges_ctx *sgl;
1459 u64 tmp_fcp_cmnd[4];
1464 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1466 /* Setup the task from io_req for easy reference */
1467 io_req->task = task;
1469 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1470 task_type = FCOE_TASK_TYPE_WRITE;
1472 task_type = FCOE_TASK_TYPE_READ;
1475 if (task_type == FCOE_TASK_TYPE_WRITE) {
1476 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1477 (u32)bd_tbl->bd_tbl_dma;
1478 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1479 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1480 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1484 /*Tx Write Rx Read */
1485 /* Init state to NORMAL */
1486 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1487 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1488 task->txwr_rxrd.const_ctx.init_flags |=
1489 FCOE_TASK_DEV_TYPE_DISK <<
1490 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1491 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1492 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1494 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1495 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1497 /* Set initial seq counter */
1498 task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1500 /* Fill FCP_CMND IU */
1502 task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1503 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1506 cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1508 for (i = 0; i < cnt; i++) {
1509 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1513 /* Rx Write Tx Read */
1514 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1516 context_id = tgt->context_id;
1517 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1518 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1521 /* Set state to "waiting for the first packet" */
1522 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1523 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1525 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1528 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1529 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1530 bd_count = bd_tbl->bd_valid;
1531 if (task_type == FCOE_TASK_TYPE_READ) {
1532 if (bd_count == 1) {
1534 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1536 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1537 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1538 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1539 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1540 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1541 } else if (bd_count == 2) {
1542 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1544 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1545 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1546 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1549 cached_sge->second_buf_addr.lo =
1550 fcoe_bd_tbl->buf_addr_lo;
1551 cached_sge->second_buf_addr.hi =
1552 fcoe_bd_tbl->buf_addr_hi;
1553 cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1554 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1555 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1558 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1559 sgl->mul_sgl.cur_sge_addr.hi =
1560 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1561 sgl->mul_sgl.sgl_size = bd_count;
1567 * bnx2fc_setup_task_ctx - allocate and map task context
1569 * @hba: pointer to adapter structure
1571 * allocate memory for task context, and associated BD table to be used
1575 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1578 struct regpair *task_ctx_bdt;
1583 * Allocate task context bd table. A page size of bd table
1584 * can map 256 buffers. Each buffer contains 32 task context
1585 * entries. Hence the limit with one page is 8192 task context
1588 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1590 &hba->task_ctx_bd_dma,
1592 if (!hba->task_ctx_bd_tbl) {
1593 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1597 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1600 * Allocate task_ctx which is an array of pointers pointing to
1601 * a page containing 32 task contexts
1603 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1605 if (!hba->task_ctx) {
1606 printk(KERN_ERR PFX "unable to allocate task context array\n");
1612 * Allocate task_ctx_dma which is an array of dma addresses
1614 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1615 sizeof(dma_addr_t)), GFP_KERNEL);
1616 if (!hba->task_ctx_dma) {
1617 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1622 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1623 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1625 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1627 &hba->task_ctx_dma[i],
1629 if (!hba->task_ctx[i]) {
1630 printk(KERN_ERR PFX "unable to alloc task context\n");
1634 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1635 addr = (u64)hba->task_ctx_dma[i];
1636 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1637 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1643 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1644 if (hba->task_ctx[i]) {
1646 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1647 hba->task_ctx[i], hba->task_ctx_dma[i]);
1648 hba->task_ctx[i] = NULL;
1652 kfree(hba->task_ctx_dma);
1653 hba->task_ctx_dma = NULL;
1655 kfree(hba->task_ctx);
1656 hba->task_ctx = NULL;
1658 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1659 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1660 hba->task_ctx_bd_tbl = NULL;
1665 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1669 if (hba->task_ctx_bd_tbl) {
1670 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1671 hba->task_ctx_bd_tbl,
1672 hba->task_ctx_bd_dma);
1673 hba->task_ctx_bd_tbl = NULL;
1676 if (hba->task_ctx) {
1677 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1678 if (hba->task_ctx[i]) {
1679 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1681 hba->task_ctx_dma[i]);
1682 hba->task_ctx[i] = NULL;
1685 kfree(hba->task_ctx);
1686 hba->task_ctx = NULL;
1689 kfree(hba->task_ctx_dma);
1690 hba->task_ctx_dma = NULL;
1693 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1697 int hash_table_size;
1700 segment_count = hba->hash_tbl_segment_count;
1701 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1702 sizeof(struct fcoe_hash_table_entry);
1704 pbl = hba->hash_tbl_pbl;
1705 for (i = 0; i < segment_count; ++i) {
1706 dma_addr_t dma_address;
1708 dma_address = le32_to_cpu(*pbl);
1710 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1712 dma_free_coherent(&hba->pcidev->dev,
1713 BNX2FC_HASH_TBL_CHUNK_SIZE,
1714 hba->hash_tbl_segments[i],
1719 if (hba->hash_tbl_pbl) {
1720 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1722 hba->hash_tbl_pbl_dma);
1723 hba->hash_tbl_pbl = NULL;
1727 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1730 int hash_table_size;
1732 int segment_array_size;
1733 int dma_segment_array_size;
1734 dma_addr_t *dma_segment_array;
1737 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1738 sizeof(struct fcoe_hash_table_entry);
1740 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1741 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1742 hba->hash_tbl_segment_count = segment_count;
1744 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1745 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1746 if (!hba->hash_tbl_segments) {
1747 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1750 dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1751 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1752 if (!dma_segment_array) {
1753 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1757 for (i = 0; i < segment_count; ++i) {
1758 hba->hash_tbl_segments[i] =
1759 dma_alloc_coherent(&hba->pcidev->dev,
1760 BNX2FC_HASH_TBL_CHUNK_SIZE,
1761 &dma_segment_array[i],
1763 if (!hba->hash_tbl_segments[i]) {
1764 printk(KERN_ERR PFX "hash segment alloc failed\n");
1766 dma_free_coherent(&hba->pcidev->dev,
1767 BNX2FC_HASH_TBL_CHUNK_SIZE,
1768 hba->hash_tbl_segments[i],
1769 dma_segment_array[i]);
1770 hba->hash_tbl_segments[i] = NULL;
1772 kfree(dma_segment_array);
1775 memset(hba->hash_tbl_segments[i], 0,
1776 BNX2FC_HASH_TBL_CHUNK_SIZE);
1779 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1781 &hba->hash_tbl_pbl_dma,
1783 if (!hba->hash_tbl_pbl) {
1784 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1785 kfree(dma_segment_array);
1788 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1790 pbl = hba->hash_tbl_pbl;
1791 for (i = 0; i < segment_count; ++i) {
1792 u64 paddr = dma_segment_array[i];
1793 *pbl = cpu_to_le32((u32) paddr);
1795 *pbl = cpu_to_le32((u32) (paddr >> 32));
1798 pbl = hba->hash_tbl_pbl;
1800 while (*pbl && *(pbl + 1)) {
1809 kfree(dma_segment_array);
1814 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1816 * @hba: Pointer to adapter structure
1819 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1825 if (bnx2fc_allocate_hash_table(hba))
1828 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1829 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1830 &hba->t2_hash_tbl_ptr_dma,
1832 if (!hba->t2_hash_tbl_ptr) {
1833 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1834 bnx2fc_free_fw_resc(hba);
1837 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1839 mem_size = BNX2FC_NUM_MAX_SESS *
1840 sizeof(struct fcoe_t2_hash_table_entry);
1841 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1842 &hba->t2_hash_tbl_dma,
1844 if (!hba->t2_hash_tbl) {
1845 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1846 bnx2fc_free_fw_resc(hba);
1849 memset(hba->t2_hash_tbl, 0x00, mem_size);
1850 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1851 addr = (unsigned long) hba->t2_hash_tbl_dma +
1852 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1853 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1854 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1857 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1858 PAGE_SIZE, &hba->dummy_buf_dma,
1860 if (!hba->dummy_buffer) {
1861 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
1862 bnx2fc_free_fw_resc(hba);
1866 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1868 &hba->stats_buf_dma,
1870 if (!hba->stats_buffer) {
1871 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
1872 bnx2fc_free_fw_resc(hba);
1875 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
1880 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
1884 if (hba->stats_buffer) {
1885 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1886 hba->stats_buffer, hba->stats_buf_dma);
1887 hba->stats_buffer = NULL;
1890 if (hba->dummy_buffer) {
1891 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1892 hba->dummy_buffer, hba->dummy_buf_dma);
1893 hba->dummy_buffer = NULL;
1896 if (hba->t2_hash_tbl_ptr) {
1897 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1898 dma_free_coherent(&hba->pcidev->dev, mem_size,
1899 hba->t2_hash_tbl_ptr,
1900 hba->t2_hash_tbl_ptr_dma);
1901 hba->t2_hash_tbl_ptr = NULL;
1904 if (hba->t2_hash_tbl) {
1905 mem_size = BNX2FC_NUM_MAX_SESS *
1906 sizeof(struct fcoe_t2_hash_table_entry);
1907 dma_free_coherent(&hba->pcidev->dev, mem_size,
1908 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
1909 hba->t2_hash_tbl = NULL;
1911 bnx2fc_free_hash_table(hba);