2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
28 * fc_rport's represent N_Port's within the fabric.
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
58 struct workqueue_struct *rport_event_queue;
60 static void fc_rport_enter_plogi(struct fc_rport *);
61 static void fc_rport_enter_prli(struct fc_rport *);
62 static void fc_rport_enter_rtv(struct fc_rport *);
63 static void fc_rport_enter_ready(struct fc_rport *);
64 static void fc_rport_enter_logo(struct fc_rport *);
66 static void fc_rport_recv_plogi_req(struct fc_rport *,
67 struct fc_seq *, struct fc_frame *);
68 static void fc_rport_recv_prli_req(struct fc_rport *,
69 struct fc_seq *, struct fc_frame *);
70 static void fc_rport_recv_prlo_req(struct fc_rport *,
71 struct fc_seq *, struct fc_frame *);
72 static void fc_rport_recv_logo_req(struct fc_rport *,
73 struct fc_seq *, struct fc_frame *);
74 static void fc_rport_timeout(struct work_struct *);
75 static void fc_rport_error(struct fc_rport *, struct fc_frame *);
76 static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *);
77 static void fc_rport_work(struct work_struct *);
79 static const char *fc_rport_state_names[] = {
80 [RPORT_ST_INIT] = "Init",
81 [RPORT_ST_PLOGI] = "PLOGI",
82 [RPORT_ST_PRLI] = "PRLI",
83 [RPORT_ST_RTV] = "RTV",
84 [RPORT_ST_READY] = "Ready",
85 [RPORT_ST_LOGO] = "LOGO",
86 [RPORT_ST_DELETE] = "Delete",
89 static void fc_rport_rogue_destroy(struct device *dev)
91 struct fc_rport *rport = dev_to_rport(dev);
92 FC_RPORT_DBG(rport, "Destroying rogue rport\n");
96 struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
98 struct fc_rport *rport;
99 struct fc_rport_libfc_priv *rdata;
100 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
105 rdata = RPORT_TO_PRIV(rport);
107 rport->dd_data = rdata;
108 rport->port_id = dp->ids.port_id;
109 rport->port_name = dp->ids.port_name;
110 rport->node_name = dp->ids.node_name;
111 rport->roles = dp->ids.roles;
112 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
114 * Note: all this libfc rogue rport code will be removed for
115 * upstream so it fine that this is really ugly and hacky right now.
117 device_initialize(&rport->dev);
118 rport->dev.release = fc_rport_rogue_destroy;
120 mutex_init(&rdata->rp_mutex);
121 rdata->local_port = dp->lp;
122 rdata->trans_state = FC_PORTSTATE_ROGUE;
123 rdata->rp_state = RPORT_ST_INIT;
124 rdata->event = RPORT_EV_NONE;
125 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
127 rdata->e_d_tov = dp->lp->e_d_tov;
128 rdata->r_a_tov = dp->lp->r_a_tov;
129 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
130 INIT_WORK(&rdata->event_work, fc_rport_work);
132 * For good measure, but not necessary as we should only
133 * add REAL rport to the lport list.
135 INIT_LIST_HEAD(&rdata->peers);
141 * fc_rport_state() - return a string for the state the rport is in
142 * @rport: The rport whose state we want to get a string for
144 static const char *fc_rport_state(struct fc_rport *rport)
147 struct fc_rport_libfc_priv *rdata = rport->dd_data;
149 cp = fc_rport_state_names[rdata->rp_state];
156 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
157 * @rport: Pointer to Fibre Channel remote port structure
158 * @timeout: timeout in seconds
160 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
163 rport->dev_loss_tmo = timeout + 5;
165 rport->dev_loss_tmo = 30;
167 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
170 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
171 * @flp: FLOGI payload structure
172 * @maxval: upper limit, may be less than what is in the service parameters
174 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
180 * Get max payload from the common service parameters and the
181 * class 3 receive data field size.
183 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
184 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
186 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
187 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
193 * fc_rport_state_enter() - Change the rport's state
194 * @rport: The rport whose state should change
195 * @new: The new state of the rport
197 * Locking Note: Called with the rport lock held
199 static void fc_rport_state_enter(struct fc_rport *rport,
200 enum fc_rport_state new)
202 struct fc_rport_libfc_priv *rdata = rport->dd_data;
203 if (rdata->rp_state != new)
205 rdata->rp_state = new;
208 static void fc_rport_work(struct work_struct *work)
211 struct fc_rport_libfc_priv *rdata =
212 container_of(work, struct fc_rport_libfc_priv, event_work);
213 enum fc_rport_event event;
214 enum fc_rport_trans_state trans_state;
215 struct fc_lport *lport = rdata->local_port;
216 struct fc_rport_operations *rport_ops;
217 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
219 mutex_lock(&rdata->rp_mutex);
220 event = rdata->event;
221 rport_ops = rdata->ops;
223 if (event == RPORT_EV_CREATED) {
224 struct fc_rport *new_rport;
225 struct fc_rport_libfc_priv *new_rdata;
226 struct fc_rport_identifiers ids;
228 ids.port_id = rport->port_id;
229 ids.roles = rport->roles;
230 ids.port_name = rport->port_name;
231 ids.node_name = rport->node_name;
233 rdata->event = RPORT_EV_NONE;
234 mutex_unlock(&rdata->rp_mutex);
236 new_rport = fc_remote_port_add(lport->host, 0, &ids);
239 * Switch from the rogue rport to the rport
240 * returned by the FC class.
242 new_rport->maxframe_size = rport->maxframe_size;
244 new_rdata = new_rport->dd_data;
245 new_rdata->e_d_tov = rdata->e_d_tov;
246 new_rdata->r_a_tov = rdata->r_a_tov;
247 new_rdata->ops = rdata->ops;
248 new_rdata->local_port = rdata->local_port;
249 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
250 new_rdata->trans_state = FC_PORTSTATE_REAL;
251 mutex_init(&new_rdata->rp_mutex);
252 INIT_DELAYED_WORK(&new_rdata->retry_work,
254 INIT_LIST_HEAD(&new_rdata->peers);
255 INIT_WORK(&new_rdata->event_work, fc_rport_work);
257 fc_rport_state_enter(new_rport, RPORT_ST_READY);
259 printk(KERN_WARNING "libfc: Failed to allocate "
260 " memory for rport (%6x)\n", ids.port_id);
261 event = RPORT_EV_FAILED;
263 if (rport->port_id != FC_FID_DIR_SERV)
264 if (rport_ops->event_callback)
265 rport_ops->event_callback(lport, rport,
267 put_device(&rport->dev);
269 rdata = new_rport->dd_data;
270 if (rport_ops->event_callback)
271 rport_ops->event_callback(lport, rport, event);
272 } else if ((event == RPORT_EV_FAILED) ||
273 (event == RPORT_EV_LOGO) ||
274 (event == RPORT_EV_STOP)) {
275 trans_state = rdata->trans_state;
276 mutex_unlock(&rdata->rp_mutex);
277 if (rport_ops->event_callback)
278 rport_ops->event_callback(lport, rport, event);
279 if (trans_state == FC_PORTSTATE_ROGUE)
280 put_device(&rport->dev);
282 port_id = rport->port_id;
283 fc_remote_port_delete(rport);
284 lport->tt.exch_mgr_reset(lport, 0, port_id);
285 lport->tt.exch_mgr_reset(lport, port_id, 0);
288 mutex_unlock(&rdata->rp_mutex);
292 * fc_rport_login() - Start the remote port login state machine
293 * @rport: Fibre Channel remote port
295 * Locking Note: Called without the rport lock held. This
296 * function will hold the rport lock, call an _enter_*
297 * function and then unlock the rport.
299 int fc_rport_login(struct fc_rport *rport)
301 struct fc_rport_libfc_priv *rdata = rport->dd_data;
303 mutex_lock(&rdata->rp_mutex);
305 FC_RPORT_DBG(rport, "Login to port\n");
307 fc_rport_enter_plogi(rport);
309 mutex_unlock(&rdata->rp_mutex);
315 * fc_rport_enter_delete() - schedule a remote port to be deleted.
316 * @rport: Fibre Channel remote port
317 * @event: event to report as the reason for deletion
319 * Locking Note: Called with the rport lock held.
321 * Allow state change into DELETE only once.
323 * Call queue_work only if there's no event already pending.
324 * Set the new event so that the old pending event will not occur.
325 * Since we have the mutex, even if fc_rport_work() is already started,
326 * it'll see the new event.
328 static void fc_rport_enter_delete(struct fc_rport *rport,
329 enum fc_rport_event event)
331 struct fc_rport_libfc_priv *rdata = rport->dd_data;
333 if (rdata->rp_state == RPORT_ST_DELETE)
336 FC_RPORT_DBG(rport, "Delete port\n");
338 fc_rport_state_enter(rport, RPORT_ST_DELETE);
340 if (rdata->event == RPORT_EV_NONE)
341 queue_work(rport_event_queue, &rdata->event_work);
342 rdata->event = event;
346 * fc_rport_logoff() - Logoff and remove an rport
347 * @rport: Fibre Channel remote port to be removed
349 * Locking Note: Called without the rport lock held. This
350 * function will hold the rport lock, call an _enter_*
351 * function and then unlock the rport.
353 int fc_rport_logoff(struct fc_rport *rport)
355 struct fc_rport_libfc_priv *rdata = rport->dd_data;
357 mutex_lock(&rdata->rp_mutex);
359 FC_RPORT_DBG(rport, "Remove port\n");
361 if (rdata->rp_state == RPORT_ST_DELETE) {
362 FC_RPORT_DBG(rport, "Port in Delete state, not removing\n");
363 mutex_unlock(&rdata->rp_mutex);
367 fc_rport_enter_logo(rport);
370 * Change the state to Delete so that we discard
373 fc_rport_enter_delete(rport, RPORT_EV_STOP);
374 mutex_unlock(&rdata->rp_mutex);
381 * fc_rport_enter_ready() - The rport is ready
382 * @rport: Fibre Channel remote port that is ready
384 * Locking Note: The rport lock is expected to be held before calling
387 static void fc_rport_enter_ready(struct fc_rport *rport)
389 struct fc_rport_libfc_priv *rdata = rport->dd_data;
391 fc_rport_state_enter(rport, RPORT_ST_READY);
393 FC_RPORT_DBG(rport, "Port is Ready\n");
395 if (rdata->event == RPORT_EV_NONE)
396 queue_work(rport_event_queue, &rdata->event_work);
397 rdata->event = RPORT_EV_CREATED;
401 * fc_rport_timeout() - Handler for the retry_work timer.
402 * @work: The work struct of the fc_rport_libfc_priv
404 * Locking Note: Called without the rport lock held. This
405 * function will hold the rport lock, call an _enter_*
406 * function and then unlock the rport.
408 static void fc_rport_timeout(struct work_struct *work)
410 struct fc_rport_libfc_priv *rdata =
411 container_of(work, struct fc_rport_libfc_priv, retry_work.work);
412 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
414 mutex_lock(&rdata->rp_mutex);
416 switch (rdata->rp_state) {
418 fc_rport_enter_plogi(rport);
421 fc_rport_enter_prli(rport);
424 fc_rport_enter_rtv(rport);
427 fc_rport_enter_logo(rport);
431 case RPORT_ST_DELETE:
435 mutex_unlock(&rdata->rp_mutex);
436 put_device(&rport->dev);
440 * fc_rport_error() - Error handler, called once retries have been exhausted
441 * @rport: The fc_rport object
442 * @fp: The frame pointer
444 * Locking Note: The rport lock is expected to be held before
445 * calling this routine
447 static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
449 struct fc_rport_libfc_priv *rdata = rport->dd_data;
451 FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
452 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
454 switch (rdata->rp_state) {
458 fc_rport_enter_delete(rport, RPORT_EV_FAILED);
461 fc_rport_enter_ready(rport);
463 case RPORT_ST_DELETE:
471 * fc_rport_error_retry() - Error handler when retries are desired
472 * @rport: The fc_rport object
473 * @fp: The frame pointer
475 * If the error was an exchange timeout retry immediately,
476 * otherwise wait for E_D_TOV.
478 * Locking Note: The rport lock is expected to be held before
479 * calling this routine
481 static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
483 struct fc_rport_libfc_priv *rdata = rport->dd_data;
484 unsigned long delay = FC_DEF_E_D_TOV;
486 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
487 if (PTR_ERR(fp) == -FC_EX_CLOSED)
488 return fc_rport_error(rport, fp);
490 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
491 FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
492 PTR_ERR(fp), fc_rport_state(rport));
494 /* no additional delay on exchange timeouts */
495 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
497 get_device(&rport->dev);
498 schedule_delayed_work(&rdata->retry_work, delay);
502 return fc_rport_error(rport, fp);
506 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
507 * @sp: current sequence in the PLOGI exchange
508 * @fp: response frame
509 * @rp_arg: Fibre Channel remote port
511 * Locking Note: This function will be called without the rport lock
512 * held, but it will lock, call an _enter_* function or fc_rport_error
513 * and then unlock the rport.
515 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
518 struct fc_rport *rport = rp_arg;
519 struct fc_rport_libfc_priv *rdata = rport->dd_data;
520 struct fc_lport *lport = rdata->local_port;
521 struct fc_els_flogi *plp = NULL;
527 mutex_lock(&rdata->rp_mutex);
529 FC_RPORT_DBG(rport, "Received a PLOGI response\n");
531 if (rdata->rp_state != RPORT_ST_PLOGI) {
532 FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
533 "%s\n", fc_rport_state(rport));
540 fc_rport_error_retry(rport, fp);
544 op = fc_frame_payload_op(fp);
545 if (op == ELS_LS_ACC &&
546 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
547 rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
548 rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
550 tov = ntohl(plp->fl_csp.sp_e_d_tov);
551 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
553 if (tov > rdata->e_d_tov)
554 rdata->e_d_tov = tov;
555 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
556 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
557 if (cssp_seq < csp_seq)
559 rdata->max_seq = csp_seq;
560 rport->maxframe_size =
561 fc_plogi_get_maxframe(plp, lport->mfs);
564 * If the rport is one of the well known addresses
565 * we skip PRLI and RTV and go straight to READY.
567 if (rport->port_id >= FC_FID_DOM_MGR)
568 fc_rport_enter_ready(rport);
570 fc_rport_enter_prli(rport);
572 fc_rport_error_retry(rport, fp);
577 mutex_unlock(&rdata->rp_mutex);
578 put_device(&rport->dev);
582 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
583 * @rport: Fibre Channel remote port to send PLOGI to
585 * Locking Note: The rport lock is expected to be held before calling
588 static void fc_rport_enter_plogi(struct fc_rport *rport)
590 struct fc_rport_libfc_priv *rdata = rport->dd_data;
591 struct fc_lport *lport = rdata->local_port;
594 FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
595 fc_rport_state(rport));
597 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
599 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
600 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
602 fc_rport_error_retry(rport, fp);
605 rdata->e_d_tov = lport->e_d_tov;
607 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
608 fc_rport_plogi_resp, rport, lport->e_d_tov))
609 fc_rport_error_retry(rport, fp);
611 get_device(&rport->dev);
615 * fc_rport_prli_resp() - Process Login (PRLI) response handler
616 * @sp: current sequence in the PRLI exchange
617 * @fp: response frame
618 * @rp_arg: Fibre Channel remote port
620 * Locking Note: This function will be called without the rport lock
621 * held, but it will lock, call an _enter_* function or fc_rport_error
622 * and then unlock the rport.
624 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
627 struct fc_rport *rport = rp_arg;
628 struct fc_rport_libfc_priv *rdata = rport->dd_data;
630 struct fc_els_prli prli;
631 struct fc_els_spp spp;
633 u32 roles = FC_RPORT_ROLE_UNKNOWN;
637 mutex_lock(&rdata->rp_mutex);
639 FC_RPORT_DBG(rport, "Received a PRLI response\n");
641 if (rdata->rp_state != RPORT_ST_PRLI) {
642 FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
643 "%s\n", fc_rport_state(rport));
650 fc_rport_error_retry(rport, fp);
654 op = fc_frame_payload_op(fp);
655 if (op == ELS_LS_ACC) {
656 pp = fc_frame_payload_get(fp, sizeof(*pp));
657 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
658 fcp_parm = ntohl(pp->spp.spp_params);
659 if (fcp_parm & FCP_SPPF_RETRY)
660 rdata->flags |= FC_RP_FLAGS_RETRY;
663 rport->supported_classes = FC_COS_CLASS3;
664 if (fcp_parm & FCP_SPPF_INIT_FCN)
665 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
666 if (fcp_parm & FCP_SPPF_TARG_FCN)
667 roles |= FC_RPORT_ROLE_FCP_TARGET;
669 rport->roles = roles;
670 fc_rport_enter_rtv(rport);
673 FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
674 fc_rport_enter_delete(rport, RPORT_EV_FAILED);
680 mutex_unlock(&rdata->rp_mutex);
681 put_device(&rport->dev);
685 * fc_rport_logo_resp() - Logout (LOGO) response handler
686 * @sp: current sequence in the LOGO exchange
687 * @fp: response frame
688 * @rp_arg: Fibre Channel remote port
690 * Locking Note: This function will be called without the rport lock
691 * held, but it will lock, call an _enter_* function or fc_rport_error
692 * and then unlock the rport.
694 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
697 struct fc_rport *rport = rp_arg;
698 struct fc_rport_libfc_priv *rdata = rport->dd_data;
701 mutex_lock(&rdata->rp_mutex);
703 FC_RPORT_DBG(rport, "Received a LOGO response\n");
705 if (rdata->rp_state != RPORT_ST_LOGO) {
706 FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
707 "%s\n", fc_rport_state(rport));
714 fc_rport_error_retry(rport, fp);
718 op = fc_frame_payload_op(fp);
719 if (op == ELS_LS_ACC) {
720 fc_rport_enter_rtv(rport);
722 FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
723 fc_rport_enter_delete(rport, RPORT_EV_LOGO);
729 mutex_unlock(&rdata->rp_mutex);
730 put_device(&rport->dev);
734 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
735 * @rport: Fibre Channel remote port to send PRLI to
737 * Locking Note: The rport lock is expected to be held before calling
740 static void fc_rport_enter_prli(struct fc_rport *rport)
742 struct fc_rport_libfc_priv *rdata = rport->dd_data;
743 struct fc_lport *lport = rdata->local_port;
745 struct fc_els_prli prli;
746 struct fc_els_spp spp;
750 FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
751 fc_rport_state(rport));
753 fc_rport_state_enter(rport, RPORT_ST_PRLI);
755 fp = fc_frame_alloc(lport, sizeof(*pp));
757 fc_rport_error_retry(rport, fp);
761 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
762 fc_rport_prli_resp, rport, lport->e_d_tov))
763 fc_rport_error_retry(rport, fp);
765 get_device(&rport->dev);
769 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
770 * @sp: current sequence in the RTV exchange
771 * @fp: response frame
772 * @rp_arg: Fibre Channel remote port
774 * Many targets don't seem to support this.
776 * Locking Note: This function will be called without the rport lock
777 * held, but it will lock, call an _enter_* function or fc_rport_error
778 * and then unlock the rport.
780 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
783 struct fc_rport *rport = rp_arg;
784 struct fc_rport_libfc_priv *rdata = rport->dd_data;
787 mutex_lock(&rdata->rp_mutex);
789 FC_RPORT_DBG(rport, "Received a RTV response\n");
791 if (rdata->rp_state != RPORT_ST_RTV) {
792 FC_RPORT_DBG(rport, "Received a RTV response, but in state "
793 "%s\n", fc_rport_state(rport));
800 fc_rport_error(rport, fp);
804 op = fc_frame_payload_op(fp);
805 if (op == ELS_LS_ACC) {
806 struct fc_els_rtv_acc *rtv;
810 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
812 toq = ntohl(rtv->rtv_toq);
813 tov = ntohl(rtv->rtv_r_a_tov);
816 rdata->r_a_tov = tov;
817 tov = ntohl(rtv->rtv_e_d_tov);
818 if (toq & FC_ELS_RTV_EDRES)
822 rdata->e_d_tov = tov;
826 fc_rport_enter_ready(rport);
831 mutex_unlock(&rdata->rp_mutex);
832 put_device(&rport->dev);
836 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
837 * @rport: Fibre Channel remote port to send RTV to
839 * Locking Note: The rport lock is expected to be held before calling
842 static void fc_rport_enter_rtv(struct fc_rport *rport)
845 struct fc_rport_libfc_priv *rdata = rport->dd_data;
846 struct fc_lport *lport = rdata->local_port;
848 FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
849 fc_rport_state(rport));
851 fc_rport_state_enter(rport, RPORT_ST_RTV);
853 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
855 fc_rport_error_retry(rport, fp);
859 if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
860 fc_rport_rtv_resp, rport, lport->e_d_tov))
861 fc_rport_error_retry(rport, fp);
863 get_device(&rport->dev);
867 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
868 * @rport: Fibre Channel remote port to send LOGO to
870 * Locking Note: The rport lock is expected to be held before calling
873 static void fc_rport_enter_logo(struct fc_rport *rport)
875 struct fc_rport_libfc_priv *rdata = rport->dd_data;
876 struct fc_lport *lport = rdata->local_port;
879 FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
880 fc_rport_state(rport));
882 fc_rport_state_enter(rport, RPORT_ST_LOGO);
884 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
886 fc_rport_error_retry(rport, fp);
890 if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
891 fc_rport_logo_resp, rport, lport->e_d_tov))
892 fc_rport_error_retry(rport, fp);
894 get_device(&rport->dev);
899 * fc_rport_recv_req() - Receive a request from a rport
900 * @sp: current sequence in the PLOGI exchange
901 * @fp: response frame
902 * @rp_arg: Fibre Channel remote port
904 * Locking Note: Called without the rport lock held. This
905 * function will hold the rport lock, call an _enter_*
906 * function and then unlock the rport.
908 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
909 struct fc_rport *rport)
911 struct fc_rport_libfc_priv *rdata = rport->dd_data;
912 struct fc_lport *lport = rdata->local_port;
914 struct fc_frame_header *fh;
915 struct fc_seq_els_data els_data;
918 mutex_lock(&rdata->rp_mutex);
921 els_data.explan = ELS_EXPL_NONE;
922 els_data.reason = ELS_RJT_NONE;
924 fh = fc_frame_header_get(fp);
926 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
927 op = fc_frame_payload_op(fp);
930 fc_rport_recv_plogi_req(rport, sp, fp);
933 fc_rport_recv_prli_req(rport, sp, fp);
936 fc_rport_recv_prlo_req(rport, sp, fp);
939 fc_rport_recv_logo_req(rport, sp, fp);
943 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
947 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
950 els_data.reason = ELS_RJT_UNSUP;
951 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
956 mutex_unlock(&rdata->rp_mutex);
960 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
961 * @rport: Fibre Channel remote port that initiated PLOGI
962 * @sp: current sequence in the PLOGI exchange
963 * @fp: PLOGI request frame
965 * Locking Note: The rport lock is exected to be held before calling
968 static void fc_rport_recv_plogi_req(struct fc_rport *rport,
969 struct fc_seq *sp, struct fc_frame *rx_fp)
971 struct fc_rport_libfc_priv *rdata = rport->dd_data;
972 struct fc_lport *lport = rdata->local_port;
973 struct fc_frame *fp = rx_fp;
975 struct fc_frame_header *fh;
976 struct fc_els_flogi *pl;
977 struct fc_seq_els_data rjt_data;
981 enum fc_els_rjt_reason reject = 0;
985 fh = fc_frame_header_get(fp);
987 FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
988 fc_rport_state(rport));
990 sid = ntoh24(fh->fh_s_id);
991 pl = fc_frame_payload_get(fp, sizeof(*pl));
993 FC_RPORT_DBG(rport, "Received PLOGI too short\n");
995 /* XXX TBD: send reject? */
999 wwpn = get_unaligned_be64(&pl->fl_wwpn);
1000 wwnn = get_unaligned_be64(&pl->fl_wwnn);
1003 * If the session was just created, possibly due to the incoming PLOGI,
1004 * set the state appropriately and accept the PLOGI.
1006 * If we had also sent a PLOGI, and if the received PLOGI is from a
1007 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1008 * "command already in progress".
1010 * XXX TBD: If the session was ready before, the PLOGI should result in
1011 * all outstanding exchanges being reset.
1013 switch (rdata->rp_state) {
1015 FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
1016 "- reject\n", (unsigned long long)wwpn);
1017 reject = ELS_RJT_UNSUP;
1019 case RPORT_ST_PLOGI:
1020 FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
1022 if (wwpn < lport->wwpn)
1023 reject = ELS_RJT_INPROG;
1026 case RPORT_ST_READY:
1027 FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
1028 "- ignored for now\n", rdata->rp_state);
1029 /* XXX TBD - should reset */
1031 case RPORT_ST_DELETE:
1033 FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
1034 "state %d\n", rdata->rp_state);
1041 rjt_data.reason = reject;
1042 rjt_data.explan = ELS_EXPL_NONE;
1043 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1046 fp = fc_frame_alloc(lport, sizeof(*pl));
1049 rjt_data.reason = ELS_RJT_UNAB;
1050 rjt_data.explan = ELS_EXPL_NONE;
1051 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1054 sp = lport->tt.seq_start_next(sp);
1056 fc_rport_set_name(rport, wwpn, wwnn);
1059 * Get session payload size from incoming PLOGI.
1061 rport->maxframe_size =
1062 fc_plogi_get_maxframe(pl, lport->mfs);
1063 fc_frame_free(rx_fp);
1064 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1067 * Send LS_ACC. If this fails,
1068 * the originator should retry.
1070 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1071 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1072 ep = fc_seq_exch(sp);
1073 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1074 FC_TYPE_ELS, f_ctl, 0);
1075 lport->tt.seq_send(lport, sp, fp);
1076 if (rdata->rp_state == RPORT_ST_PLOGI)
1077 fc_rport_enter_prli(rport);
1083 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1084 * @rport: Fibre Channel remote port that initiated PRLI
1085 * @sp: current sequence in the PRLI exchange
1086 * @fp: PRLI request frame
1088 * Locking Note: The rport lock is exected to be held before calling
1091 static void fc_rport_recv_prli_req(struct fc_rport *rport,
1092 struct fc_seq *sp, struct fc_frame *rx_fp)
1094 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1095 struct fc_lport *lport = rdata->local_port;
1097 struct fc_frame *fp;
1098 struct fc_frame_header *fh;
1100 struct fc_els_prli prli;
1101 struct fc_els_spp spp;
1103 struct fc_els_spp *rspp; /* request service param page */
1104 struct fc_els_spp *spp; /* response spp */
1107 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1108 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1109 enum fc_els_spp_resp resp;
1110 struct fc_seq_els_data rjt_data;
1113 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1116 fh = fc_frame_header_get(rx_fp);
1118 FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
1119 fc_rport_state(rport));
1121 switch (rdata->rp_state) {
1123 case RPORT_ST_READY:
1124 reason = ELS_RJT_NONE;
1127 fc_frame_free(rx_fp);
1131 len = fr_len(rx_fp) - sizeof(*fh);
1132 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1134 reason = ELS_RJT_PROT;
1135 explan = ELS_EXPL_INV_LEN;
1137 plen = ntohs(pp->prli.prli_len);
1138 if ((plen % 4) != 0 || plen > len) {
1139 reason = ELS_RJT_PROT;
1140 explan = ELS_EXPL_INV_LEN;
1141 } else if (plen < len) {
1144 plen = pp->prli.prli_spp_len;
1145 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1146 plen > len || len < sizeof(*pp)) {
1147 reason = ELS_RJT_PROT;
1148 explan = ELS_EXPL_INV_LEN;
1152 if (reason != ELS_RJT_NONE ||
1153 (fp = fc_frame_alloc(lport, len)) == NULL) {
1154 rjt_data.reason = reason;
1155 rjt_data.explan = explan;
1156 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1158 sp = lport->tt.seq_start_next(sp);
1160 pp = fc_frame_payload_get(fp, len);
1163 pp->prli.prli_cmd = ELS_LS_ACC;
1164 pp->prli.prli_spp_len = plen;
1165 pp->prli.prli_len = htons(len);
1166 len -= sizeof(struct fc_els_prli);
1169 * Go through all the service parameter pages and build
1170 * response. If plen indicates longer SPP than standard,
1171 * use that. The entire response has been pre-cleared above.
1174 while (len >= plen) {
1175 spp->spp_type = rspp->spp_type;
1176 spp->spp_type_ext = rspp->spp_type_ext;
1177 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1178 resp = FC_SPP_RESP_ACK;
1179 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1180 resp = FC_SPP_RESP_NO_PA;
1181 switch (rspp->spp_type) {
1182 case 0: /* common to all FC-4 types */
1185 fcp_parm = ntohl(rspp->spp_params);
1186 if (fcp_parm * FCP_SPPF_RETRY)
1187 rdata->flags |= FC_RP_FLAGS_RETRY;
1188 rport->supported_classes = FC_COS_CLASS3;
1189 if (fcp_parm & FCP_SPPF_INIT_FCN)
1190 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1191 if (fcp_parm & FCP_SPPF_TARG_FCN)
1192 roles |= FC_RPORT_ROLE_FCP_TARGET;
1193 rport->roles = roles;
1196 htonl(lport->service_params);
1199 resp = FC_SPP_RESP_INVL;
1202 spp->spp_flags |= resp;
1204 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1205 spp = (struct fc_els_spp *)((char *)spp + plen);
1209 * Send LS_ACC. If this fails, the originator should retry.
1211 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1212 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1213 ep = fc_seq_exch(sp);
1214 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1215 FC_TYPE_ELS, f_ctl, 0);
1216 lport->tt.seq_send(lport, sp, fp);
1219 * Get lock and re-check state.
1221 switch (rdata->rp_state) {
1223 fc_rport_enter_ready(rport);
1225 case RPORT_ST_READY:
1231 fc_frame_free(rx_fp);
1235 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1236 * @rport: Fibre Channel remote port that initiated PRLO
1237 * @sp: current sequence in the PRLO exchange
1238 * @fp: PRLO request frame
1240 * Locking Note: The rport lock is exected to be held before calling
1243 static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1244 struct fc_frame *fp)
1246 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1247 struct fc_lport *lport = rdata->local_port;
1249 struct fc_frame_header *fh;
1250 struct fc_seq_els_data rjt_data;
1252 fh = fc_frame_header_get(fp);
1254 FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
1255 fc_rport_state(rport));
1257 if (rdata->rp_state == RPORT_ST_DELETE) {
1263 rjt_data.reason = ELS_RJT_UNAB;
1264 rjt_data.explan = ELS_EXPL_NONE;
1265 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1270 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1271 * @rport: Fibre Channel remote port that initiated LOGO
1272 * @sp: current sequence in the LOGO exchange
1273 * @fp: LOGO request frame
1275 * Locking Note: The rport lock is exected to be held before calling
1278 static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
1279 struct fc_frame *fp)
1281 struct fc_frame_header *fh;
1282 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1283 struct fc_lport *lport = rdata->local_port;
1285 fh = fc_frame_header_get(fp);
1287 FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
1288 fc_rport_state(rport));
1290 if (rdata->rp_state == RPORT_ST_DELETE) {
1295 rdata->event = RPORT_EV_LOGO;
1296 fc_rport_state_enter(rport, RPORT_ST_DELETE);
1297 queue_work(rport_event_queue, &rdata->event_work);
1299 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1303 static void fc_rport_flush_queue(void)
1305 flush_workqueue(rport_event_queue);
1308 int fc_rport_init(struct fc_lport *lport)
1310 if (!lport->tt.rport_create)
1311 lport->tt.rport_create = fc_rport_rogue_create;
1313 if (!lport->tt.rport_login)
1314 lport->tt.rport_login = fc_rport_login;
1316 if (!lport->tt.rport_logoff)
1317 lport->tt.rport_logoff = fc_rport_logoff;
1319 if (!lport->tt.rport_recv_req)
1320 lport->tt.rport_recv_req = fc_rport_recv_req;
1322 if (!lport->tt.rport_flush_queue)
1323 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1327 EXPORT_SYMBOL(fc_rport_init);
1329 int fc_setup_rport(void)
1331 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1332 if (!rport_event_queue)
1336 EXPORT_SYMBOL(fc_setup_rport);
1338 void fc_destroy_rport(void)
1340 destroy_workqueue(rport_event_queue);
1342 EXPORT_SYMBOL(fc_destroy_rport);
1344 void fc_rport_terminate_io(struct fc_rport *rport)
1346 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1347 struct fc_lport *lport = rdata->local_port;
1349 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1350 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1352 EXPORT_SYMBOL(fc_rport_terminate_io);