2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <asm/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
51 #include <rdma/ib_cache.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "0.2"
58 #define DRV_RELDATE "November 1, 2005"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
66 static int srp_max_iu_len;
68 module_param(srp_sg_tablesize, int, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize,
70 "Max number of gather/scatter entries per I/O (default is 12)");
72 static int topspin_workarounds = 1;
74 module_param(topspin_workarounds, int, 0444);
75 MODULE_PARM_DESC(topspin_workarounds,
76 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
78 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
80 static void srp_add_one(struct ib_device *device);
81 static void srp_remove_one(struct ib_device *device);
82 static void srp_completion(struct ib_cq *cq, void *target_ptr);
83 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
85 static struct ib_client srp_client = {
88 .remove = srp_remove_one
91 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
93 return (struct srp_target_port *) host->hostdata;
96 static const char *srp_target_info(struct Scsi_Host *host)
98 return host_to_target(host)->target_name;
101 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
103 enum dma_data_direction direction)
107 iu = kmalloc(sizeof *iu, gfp_mask);
111 iu->buf = kzalloc(size, gfp_mask);
115 iu->dma = dma_map_single(host->dev->dev->dma_device,
116 iu->buf, size, direction);
117 if (dma_mapping_error(iu->dma))
121 iu->direction = direction;
133 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
138 dma_unmap_single(host->dev->dev->dma_device,
139 iu->dma, iu->size, iu->direction);
144 static void srp_qp_event(struct ib_event *event, void *context)
146 printk(KERN_ERR PFX "QP event %d\n", event->event);
149 static int srp_init_qp(struct srp_target_port *target,
152 struct ib_qp_attr *attr;
155 attr = kmalloc(sizeof *attr, GFP_KERNEL);
159 ret = ib_find_cached_pkey(target->srp_host->dev->dev,
160 target->srp_host->port,
161 be16_to_cpu(target->path.pkey),
166 attr->qp_state = IB_QPS_INIT;
167 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
168 IB_ACCESS_REMOTE_WRITE);
169 attr->port_num = target->srp_host->port;
171 ret = ib_modify_qp(qp, attr,
182 static int srp_create_target_ib(struct srp_target_port *target)
184 struct ib_qp_init_attr *init_attr;
187 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
191 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
192 NULL, target, SRP_CQ_SIZE);
193 if (IS_ERR(target->cq)) {
194 ret = PTR_ERR(target->cq);
198 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
200 init_attr->event_handler = srp_qp_event;
201 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
202 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
203 init_attr->cap.max_recv_sge = 1;
204 init_attr->cap.max_send_sge = 1;
205 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
206 init_attr->qp_type = IB_QPT_RC;
207 init_attr->send_cq = target->cq;
208 init_attr->recv_cq = target->cq;
210 target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr);
211 if (IS_ERR(target->qp)) {
212 ret = PTR_ERR(target->qp);
213 ib_destroy_cq(target->cq);
217 ret = srp_init_qp(target, target->qp);
219 ib_destroy_qp(target->qp);
220 ib_destroy_cq(target->cq);
229 static void srp_free_target_ib(struct srp_target_port *target)
233 ib_destroy_qp(target->qp);
234 ib_destroy_cq(target->cq);
236 for (i = 0; i < SRP_RQ_SIZE; ++i)
237 srp_free_iu(target->srp_host, target->rx_ring[i]);
238 for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
239 srp_free_iu(target->srp_host, target->tx_ring[i]);
242 static void srp_path_rec_completion(int status,
243 struct ib_sa_path_rec *pathrec,
246 struct srp_target_port *target = target_ptr;
248 target->status = status;
250 printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
252 target->path = *pathrec;
253 complete(&target->done);
256 static int srp_lookup_path(struct srp_target_port *target)
258 target->path.numb_path = 1;
260 init_completion(&target->done);
262 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev,
263 target->srp_host->port,
265 IB_SA_PATH_REC_DGID |
266 IB_SA_PATH_REC_SGID |
267 IB_SA_PATH_REC_NUMB_PATH |
269 SRP_PATH_REC_TIMEOUT_MS,
271 srp_path_rec_completion,
272 target, &target->path_query);
273 if (target->path_query_id < 0)
274 return target->path_query_id;
276 wait_for_completion(&target->done);
278 if (target->status < 0)
279 printk(KERN_WARNING PFX "Path record query failed\n");
281 return target->status;
284 static int srp_send_req(struct srp_target_port *target)
287 struct ib_cm_req_param param;
288 struct srp_login_req priv;
292 req = kzalloc(sizeof *req, GFP_KERNEL);
296 req->param.primary_path = &target->path;
297 req->param.alternate_path = NULL;
298 req->param.service_id = target->service_id;
299 req->param.qp_num = target->qp->qp_num;
300 req->param.qp_type = target->qp->qp_type;
301 req->param.private_data = &req->priv;
302 req->param.private_data_len = sizeof req->priv;
303 req->param.flow_control = 1;
305 get_random_bytes(&req->param.starting_psn, 4);
306 req->param.starting_psn &= 0xffffff;
309 * Pick some arbitrary defaults here; we could make these
310 * module parameters if anyone cared about setting them.
312 req->param.responder_resources = 4;
313 req->param.remote_cm_response_timeout = 20;
314 req->param.local_cm_response_timeout = 20;
315 req->param.retry_count = 7;
316 req->param.rnr_retry_count = 7;
317 req->param.max_cm_retries = 15;
319 req->priv.opcode = SRP_LOGIN_REQ;
321 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
322 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
323 SRP_BUF_FORMAT_INDIRECT);
325 * In the published SRP specification (draft rev. 16a), the
326 * port identifier format is 8 bytes of ID extension followed
327 * by 8 bytes of GUID. Older drafts put the two halves in the
328 * opposite order, so that the GUID comes first.
330 * Targets conforming to these obsolete drafts can be
331 * recognized by the I/O Class they report.
333 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
334 memcpy(req->priv.initiator_port_id,
335 target->srp_host->initiator_port_id + 8, 8);
336 memcpy(req->priv.initiator_port_id + 8,
337 target->srp_host->initiator_port_id, 8);
338 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
339 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
341 memcpy(req->priv.initiator_port_id,
342 target->srp_host->initiator_port_id, 16);
343 memcpy(req->priv.target_port_id, &target->id_ext, 8);
344 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
348 * Topspin/Cisco SRP targets will reject our login unless we
349 * zero out the first 8 bytes of our initiator port ID. The
350 * second 8 bytes must be our local node GUID, but we always
353 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {
354 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
355 "activated for target GUID %016llx\n",
356 (unsigned long long) be64_to_cpu(target->ioc_guid));
357 memset(req->priv.initiator_port_id, 0, 8);
360 status = ib_send_cm_req(target->cm_id, &req->param);
367 static void srp_disconnect_target(struct srp_target_port *target)
369 /* XXX should send SRP_I_LOGOUT request */
371 init_completion(&target->done);
372 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
373 printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
376 wait_for_completion(&target->done);
379 static void srp_remove_work(void *target_ptr)
381 struct srp_target_port *target = target_ptr;
383 spin_lock_irq(target->scsi_host->host_lock);
384 if (target->state != SRP_TARGET_DEAD) {
385 spin_unlock_irq(target->scsi_host->host_lock);
388 target->state = SRP_TARGET_REMOVED;
389 spin_unlock_irq(target->scsi_host->host_lock);
391 spin_lock(&target->srp_host->target_lock);
392 list_del(&target->list);
393 spin_unlock(&target->srp_host->target_lock);
395 scsi_remove_host(target->scsi_host);
396 ib_destroy_cm_id(target->cm_id);
397 srp_free_target_ib(target);
398 scsi_host_put(target->scsi_host);
401 static int srp_connect_target(struct srp_target_port *target)
405 ret = srp_lookup_path(target);
410 init_completion(&target->done);
411 ret = srp_send_req(target);
414 wait_for_completion(&target->done);
417 * The CM event handling code will set status to
418 * SRP_PORT_REDIRECT if we get a port redirect REJ
419 * back, or SRP_DLID_REDIRECT if we get a lid/qp
422 switch (target->status) {
426 case SRP_PORT_REDIRECT:
427 ret = srp_lookup_path(target);
432 case SRP_DLID_REDIRECT:
436 return target->status;
441 static void srp_unmap_data(struct scsi_cmnd *scmnd,
442 struct srp_target_port *target,
443 struct srp_request *req)
445 struct scatterlist *scat;
448 if (!scmnd->request_buffer ||
449 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
450 scmnd->sc_data_direction != DMA_FROM_DEVICE))
454 ib_fmr_pool_unmap(req->fmr);
459 * This handling of non-SG commands can be killed when the
460 * SCSI midlayer no longer generates non-SG commands.
462 if (likely(scmnd->use_sg)) {
463 nents = scmnd->use_sg;
464 scat = scmnd->request_buffer;
467 scat = &req->fake_sg;
470 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
471 scmnd->sc_data_direction);
474 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
476 srp_unmap_data(req->scmnd, target, req);
477 list_move_tail(&req->list, &target->free_reqs);
480 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
482 req->scmnd->result = DID_RESET << 16;
483 req->scmnd->scsi_done(req->scmnd);
484 srp_remove_req(target, req);
487 static int srp_reconnect_target(struct srp_target_port *target)
489 struct ib_cm_id *new_cm_id;
490 struct ib_qp_attr qp_attr;
491 struct srp_request *req, *tmp;
495 spin_lock_irq(target->scsi_host->host_lock);
496 if (target->state != SRP_TARGET_LIVE) {
497 spin_unlock_irq(target->scsi_host->host_lock);
500 target->state = SRP_TARGET_CONNECTING;
501 spin_unlock_irq(target->scsi_host->host_lock);
503 srp_disconnect_target(target);
505 * Now get a new local CM ID so that we avoid confusing the
506 * target in case things are really fouled up.
508 new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
509 srp_cm_handler, target);
510 if (IS_ERR(new_cm_id)) {
511 ret = PTR_ERR(new_cm_id);
514 ib_destroy_cm_id(target->cm_id);
515 target->cm_id = new_cm_id;
517 qp_attr.qp_state = IB_QPS_RESET;
518 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
522 ret = srp_init_qp(target, target->qp);
526 while (ib_poll_cq(target->cq, 1, &wc) > 0)
529 spin_lock_irq(target->scsi_host->host_lock);
530 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
531 srp_reset_req(target, req);
532 spin_unlock_irq(target->scsi_host->host_lock);
538 ret = srp_connect_target(target);
542 spin_lock_irq(target->scsi_host->host_lock);
543 if (target->state == SRP_TARGET_CONNECTING) {
545 target->state = SRP_TARGET_LIVE;
548 spin_unlock_irq(target->scsi_host->host_lock);
553 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
556 * We couldn't reconnect, so kill our target port off.
557 * However, we have to defer the real removal because we might
558 * be in the context of the SCSI error handler now, which
559 * would deadlock if we call scsi_remove_host().
561 spin_lock_irq(target->scsi_host->host_lock);
562 if (target->state == SRP_TARGET_CONNECTING) {
563 target->state = SRP_TARGET_DEAD;
564 INIT_WORK(&target->work, srp_remove_work, target);
565 schedule_work(&target->work);
567 spin_unlock_irq(target->scsi_host->host_lock);
572 static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
573 int sg_cnt, struct srp_request *req,
574 struct srp_direct_buf *buf)
587 for (i = 0; i < sg_cnt; ++i) {
588 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
594 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
595 ~dev->fmr_page_mask) {
602 len += sg_dma_len(&scat[i]);
605 page_cnt += len >> dev->fmr_page_shift;
606 if (page_cnt > SRP_FMR_SIZE)
609 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
614 for (i = 0; i < sg_cnt; ++i)
615 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
616 dma_pages[page_cnt++] =
617 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
619 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
620 dma_pages, page_cnt, io_addr);
621 if (IS_ERR(req->fmr)) {
622 ret = PTR_ERR(req->fmr);
627 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
628 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
629 buf->len = cpu_to_be32(len);
639 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
640 struct srp_request *req)
642 struct scatterlist *scat;
643 struct srp_cmd *cmd = req->cmd->buf;
644 int len, nents, count;
645 u8 fmt = SRP_DATA_DESC_DIRECT;
647 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
648 return sizeof (struct srp_cmd);
650 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
651 scmnd->sc_data_direction != DMA_TO_DEVICE) {
652 printk(KERN_WARNING PFX "Unhandled data direction %d\n",
653 scmnd->sc_data_direction);
658 * This handling of non-SG commands can be killed when the
659 * SCSI midlayer no longer generates non-SG commands.
661 if (likely(scmnd->use_sg)) {
662 nents = scmnd->use_sg;
663 scat = scmnd->request_buffer;
666 scat = &req->fake_sg;
667 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
670 count = dma_map_sg(target->srp_host->dev->dev->dma_device,
671 scat, nents, scmnd->sc_data_direction);
673 fmt = SRP_DATA_DESC_DIRECT;
674 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
678 * The midlayer only generated a single gather/scatter
679 * entry, or DMA mapping coalesced everything to a
680 * single entry. So a direct descriptor along with
681 * the DMA MR suffices.
683 struct srp_direct_buf *buf = (void *) cmd->add_data;
685 buf->va = cpu_to_be64(sg_dma_address(scat));
686 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
687 buf->len = cpu_to_be32(sg_dma_len(scat));
688 } else if (srp_map_fmr(target->srp_host->dev, scat, count, req,
689 (void *) cmd->add_data)) {
691 * FMR mapping failed, and the scatterlist has more
692 * than one entry. Generate an indirect memory
695 struct srp_indirect_buf *buf = (void *) cmd->add_data;
699 fmt = SRP_DATA_DESC_INDIRECT;
700 len = sizeof (struct srp_cmd) +
701 sizeof (struct srp_indirect_buf) +
702 count * sizeof (struct srp_direct_buf);
704 for (i = 0; i < count; ++i) {
705 buf->desc_list[i].va =
706 cpu_to_be64(sg_dma_address(&scat[i]));
707 buf->desc_list[i].key =
708 cpu_to_be32(target->srp_host->dev->mr->rkey);
709 buf->desc_list[i].len =
710 cpu_to_be32(sg_dma_len(&scat[i]));
711 datalen += sg_dma_len(&scat[i]);
714 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
715 cmd->data_out_desc_cnt = count;
717 cmd->data_in_desc_cnt = count;
720 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
721 buf->table_desc.key =
722 cpu_to_be32(target->srp_host->dev->mr->rkey);
723 buf->table_desc.len =
724 cpu_to_be32(count * sizeof (struct srp_direct_buf));
726 buf->len = cpu_to_be32(datalen);
729 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
730 cmd->buf_fmt = fmt << 4;
737 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
739 struct srp_request *req;
740 struct scsi_cmnd *scmnd;
744 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
746 spin_lock_irqsave(target->scsi_host->host_lock, flags);
748 target->req_lim += delta;
750 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
752 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
753 if (be32_to_cpu(rsp->resp_data_len) < 4)
754 req->tsk_status = -1;
756 req->tsk_status = rsp->data[3];
757 complete(&req->done);
761 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
762 (unsigned long long) rsp->tag);
763 scmnd->result = rsp->status;
765 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
766 memcpy(scmnd->sense_buffer, rsp->data +
767 be32_to_cpu(rsp->resp_data_len),
768 min_t(int, be32_to_cpu(rsp->sense_data_len),
769 SCSI_SENSE_BUFFERSIZE));
772 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
773 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
774 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
775 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
777 if (!req->tsk_mgmt) {
778 scmnd->host_scribble = (void *) -1L;
779 scmnd->scsi_done(scmnd);
781 srp_remove_req(target, req);
786 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
789 static void srp_reconnect_work(void *target_ptr)
791 struct srp_target_port *target = target_ptr;
793 srp_reconnect_target(target);
796 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
801 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
803 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
804 target->max_ti_iu_len, DMA_FROM_DEVICE);
806 opcode = *(u8 *) iu->buf;
811 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
813 for (i = 0; i < wc->byte_len; ++i) {
815 printk(KERN_ERR " [%02x] ", i);
816 printk(" %02x", ((u8 *) iu->buf)[i]);
817 if ((i + 1) % 8 == 0)
821 if (wc->byte_len % 8)
827 srp_process_rsp(target, iu->buf);
831 /* XXX Handle target logout */
832 printk(KERN_WARNING PFX "Got target logout request\n");
836 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
840 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
841 target->max_ti_iu_len, DMA_FROM_DEVICE);
844 static void srp_completion(struct ib_cq *cq, void *target_ptr)
846 struct srp_target_port *target = target_ptr;
850 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
851 while (ib_poll_cq(cq, 1, &wc) > 0) {
853 printk(KERN_ERR PFX "failed %s status %d\n",
854 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
856 spin_lock_irqsave(target->scsi_host->host_lock, flags);
857 if (target->state == SRP_TARGET_LIVE)
858 schedule_work(&target->work);
859 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
863 if (wc.wr_id & SRP_OP_RECV)
864 srp_handle_recv(target, &wc);
870 static int __srp_post_recv(struct srp_target_port *target)
874 struct ib_recv_wr wr, *bad_wr;
878 next = target->rx_head & (SRP_RQ_SIZE - 1);
879 wr.wr_id = next | SRP_OP_RECV;
880 iu = target->rx_ring[next];
883 list.length = iu->size;
884 list.lkey = target->srp_host->dev->mr->lkey;
890 ret = ib_post_recv(target->qp, &wr, &bad_wr);
897 static int srp_post_recv(struct srp_target_port *target)
902 spin_lock_irqsave(target->scsi_host->host_lock, flags);
903 ret = __srp_post_recv(target);
904 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
910 * Must be called with target->scsi_host->host_lock held to protect
911 * req_lim and tx_head. Lock cannot be dropped between call here and
912 * call to __srp_post_send().
914 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
916 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
919 if (unlikely(target->req_lim < 1))
920 ++target->zero_req_lim;
922 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
926 * Must be called with target->scsi_host->host_lock held to protect
927 * req_lim and tx_head.
929 static int __srp_post_send(struct srp_target_port *target,
930 struct srp_iu *iu, int len)
933 struct ib_send_wr wr, *bad_wr;
938 list.lkey = target->srp_host->dev->mr->lkey;
941 wr.wr_id = target->tx_head & SRP_SQ_SIZE;
944 wr.opcode = IB_WR_SEND;
945 wr.send_flags = IB_SEND_SIGNALED;
947 ret = ib_post_send(target->qp, &wr, &bad_wr);
957 static int srp_queuecommand(struct scsi_cmnd *scmnd,
958 void (*done)(struct scsi_cmnd *))
960 struct srp_target_port *target = host_to_target(scmnd->device->host);
961 struct srp_request *req;
966 if (target->state == SRP_TARGET_CONNECTING)
969 if (target->state == SRP_TARGET_DEAD ||
970 target->state == SRP_TARGET_REMOVED) {
971 scmnd->result = DID_BAD_TARGET << 16;
976 iu = __srp_get_tx_iu(target);
980 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
981 srp_max_iu_len, DMA_TO_DEVICE);
983 req = list_entry(target->free_reqs.next, struct srp_request, list);
985 scmnd->scsi_done = done;
987 scmnd->host_scribble = (void *) (long) req->index;
990 memset(cmd, 0, sizeof *cmd);
992 cmd->opcode = SRP_CMD;
993 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
994 cmd->tag = req->index;
995 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1000 req->tsk_mgmt = NULL;
1002 len = srp_map_data(scmnd, target, req);
1004 printk(KERN_ERR PFX "Failed to map data\n");
1008 if (__srp_post_recv(target)) {
1009 printk(KERN_ERR PFX "Recv failed\n");
1013 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
1014 srp_max_iu_len, DMA_TO_DEVICE);
1016 if (__srp_post_send(target, iu, len)) {
1017 printk(KERN_ERR PFX "Send failed\n");
1021 list_move_tail(&req->list, &target->req_queue);
1026 srp_unmap_data(scmnd, target, req);
1029 return SCSI_MLQUEUE_HOST_BUSY;
1032 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1036 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1037 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1038 target->max_ti_iu_len,
1039 GFP_KERNEL, DMA_FROM_DEVICE);
1040 if (!target->rx_ring[i])
1044 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1045 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1047 GFP_KERNEL, DMA_TO_DEVICE);
1048 if (!target->tx_ring[i])
1055 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1056 srp_free_iu(target->srp_host, target->rx_ring[i]);
1057 target->rx_ring[i] = NULL;
1060 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1061 srp_free_iu(target->srp_host, target->tx_ring[i]);
1062 target->tx_ring[i] = NULL;
1068 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1069 struct ib_cm_event *event,
1070 struct srp_target_port *target)
1072 struct ib_class_port_info *cpi;
1075 switch (event->param.rej_rcvd.reason) {
1076 case IB_CM_REJ_PORT_CM_REDIRECT:
1077 cpi = event->param.rej_rcvd.ari;
1078 target->path.dlid = cpi->redirect_lid;
1079 target->path.pkey = cpi->redirect_pkey;
1080 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1081 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1083 target->status = target->path.dlid ?
1084 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1087 case IB_CM_REJ_PORT_REDIRECT:
1088 if (topspin_workarounds &&
1089 !memcmp(&target->ioc_guid, topspin_oui, 3)) {
1091 * Topspin/Cisco SRP gateways incorrectly send
1092 * reject reason code 25 when they mean 24
1095 memcpy(target->path.dgid.raw,
1096 event->param.rej_rcvd.ari, 16);
1098 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1099 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1100 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1102 target->status = SRP_PORT_REDIRECT;
1104 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1105 target->status = -ECONNRESET;
1109 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1110 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1111 target->status = -ECONNRESET;
1114 case IB_CM_REJ_CONSUMER_DEFINED:
1115 opcode = *(u8 *) event->private_data;
1116 if (opcode == SRP_LOGIN_REJ) {
1117 struct srp_login_rej *rej = event->private_data;
1118 u32 reason = be32_to_cpu(rej->reason);
1120 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1121 printk(KERN_WARNING PFX
1122 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1124 printk(KERN_WARNING PFX
1125 "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1127 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1128 " opcode 0x%02x\n", opcode);
1129 target->status = -ECONNRESET;
1133 printk(KERN_WARNING " REJ reason 0x%x\n",
1134 event->param.rej_rcvd.reason);
1135 target->status = -ECONNRESET;
1139 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1141 struct srp_target_port *target = cm_id->context;
1142 struct ib_qp_attr *qp_attr = NULL;
1147 switch (event->event) {
1148 case IB_CM_REQ_ERROR:
1149 printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
1151 target->status = -ECONNRESET;
1154 case IB_CM_REP_RECEIVED:
1156 opcode = *(u8 *) event->private_data;
1158 if (opcode == SRP_LOGIN_RSP) {
1159 struct srp_login_rsp *rsp = event->private_data;
1161 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1162 target->req_lim = be32_to_cpu(rsp->req_lim_delta);
1164 target->scsi_host->can_queue = min(target->req_lim,
1165 target->scsi_host->can_queue);
1167 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
1168 target->status = -ECONNRESET;
1172 target->status = srp_alloc_iu_bufs(target);
1176 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1178 target->status = -ENOMEM;
1182 qp_attr->qp_state = IB_QPS_RTR;
1183 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1187 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1191 target->status = srp_post_recv(target);
1195 qp_attr->qp_state = IB_QPS_RTS;
1196 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1200 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1204 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1210 case IB_CM_REJ_RECEIVED:
1211 printk(KERN_DEBUG PFX "REJ received\n");
1214 srp_cm_rej_handler(cm_id, event, target);
1217 case IB_CM_DREQ_RECEIVED:
1218 printk(KERN_WARNING PFX "DREQ received - connection closed\n");
1219 if (ib_send_cm_drep(cm_id, NULL, 0))
1220 printk(KERN_ERR PFX "Sending CM DREP failed\n");
1223 case IB_CM_TIMEWAIT_EXIT:
1224 printk(KERN_ERR PFX "connection closed\n");
1230 case IB_CM_MRA_RECEIVED:
1231 case IB_CM_DREQ_ERROR:
1232 case IB_CM_DREP_RECEIVED:
1236 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
1241 complete(&target->done);
1248 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1249 struct srp_request *req, u8 func)
1252 struct srp_tsk_mgmt *tsk_mgmt;
1254 spin_lock_irq(target->scsi_host->host_lock);
1256 if (target->state == SRP_TARGET_DEAD ||
1257 target->state == SRP_TARGET_REMOVED) {
1258 req->scmnd->result = DID_BAD_TARGET << 16;
1262 init_completion(&req->done);
1264 iu = __srp_get_tx_iu(target);
1269 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1271 tsk_mgmt->opcode = SRP_TSK_MGMT;
1272 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1273 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
1274 tsk_mgmt->tsk_mgmt_func = func;
1275 tsk_mgmt->task_tag = req->index;
1277 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1282 spin_unlock_irq(target->scsi_host->host_lock);
1284 if (!wait_for_completion_timeout(&req->done,
1285 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1291 spin_unlock_irq(target->scsi_host->host_lock);
1295 static int srp_find_req(struct srp_target_port *target,
1296 struct scsi_cmnd *scmnd,
1297 struct srp_request **req)
1299 if (scmnd->host_scribble == (void *) -1L)
1302 *req = &target->req_ring[(long) scmnd->host_scribble];
1307 static int srp_abort(struct scsi_cmnd *scmnd)
1309 struct srp_target_port *target = host_to_target(scmnd->device->host);
1310 struct srp_request *req;
1313 printk(KERN_ERR "SRP abort called\n");
1315 if (srp_find_req(target, scmnd, &req))
1317 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1320 spin_lock_irq(target->scsi_host->host_lock);
1322 if (req->cmd_done) {
1323 srp_remove_req(target, req);
1324 scmnd->scsi_done(scmnd);
1325 } else if (!req->tsk_status) {
1326 srp_remove_req(target, req);
1327 scmnd->result = DID_ABORT << 16;
1331 spin_unlock_irq(target->scsi_host->host_lock);
1336 static int srp_reset_device(struct scsi_cmnd *scmnd)
1338 struct srp_target_port *target = host_to_target(scmnd->device->host);
1339 struct srp_request *req, *tmp;
1341 printk(KERN_ERR "SRP reset_device called\n");
1343 if (srp_find_req(target, scmnd, &req))
1345 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1347 if (req->tsk_status)
1350 spin_lock_irq(target->scsi_host->host_lock);
1352 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1353 if (req->scmnd->device == scmnd->device)
1354 srp_reset_req(target, req);
1356 spin_unlock_irq(target->scsi_host->host_lock);
1361 static int srp_reset_host(struct scsi_cmnd *scmnd)
1363 struct srp_target_port *target = host_to_target(scmnd->device->host);
1366 printk(KERN_ERR PFX "SRP reset_host called\n");
1368 if (!srp_reconnect_target(target))
1374 static ssize_t show_id_ext(struct class_device *cdev, char *buf)
1376 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1378 if (target->state == SRP_TARGET_DEAD ||
1379 target->state == SRP_TARGET_REMOVED)
1382 return sprintf(buf, "0x%016llx\n",
1383 (unsigned long long) be64_to_cpu(target->id_ext));
1386 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf)
1388 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1390 if (target->state == SRP_TARGET_DEAD ||
1391 target->state == SRP_TARGET_REMOVED)
1394 return sprintf(buf, "0x%016llx\n",
1395 (unsigned long long) be64_to_cpu(target->ioc_guid));
1398 static ssize_t show_service_id(struct class_device *cdev, char *buf)
1400 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1402 if (target->state == SRP_TARGET_DEAD ||
1403 target->state == SRP_TARGET_REMOVED)
1406 return sprintf(buf, "0x%016llx\n",
1407 (unsigned long long) be64_to_cpu(target->service_id));
1410 static ssize_t show_pkey(struct class_device *cdev, char *buf)
1412 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1414 if (target->state == SRP_TARGET_DEAD ||
1415 target->state == SRP_TARGET_REMOVED)
1418 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1421 static ssize_t show_dgid(struct class_device *cdev, char *buf)
1423 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1425 if (target->state == SRP_TARGET_DEAD ||
1426 target->state == SRP_TARGET_REMOVED)
1429 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1430 be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]),
1431 be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]),
1432 be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]),
1433 be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]),
1434 be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]),
1435 be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]),
1436 be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]),
1437 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1440 static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1442 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1444 if (target->state == SRP_TARGET_DEAD ||
1445 target->state == SRP_TARGET_REMOVED)
1448 return sprintf(buf, "%d\n", target->zero_req_lim);
1451 static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1452 static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1453 static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1454 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1455 static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1456 static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1458 static struct class_device_attribute *srp_host_attrs[] = {
1459 &class_device_attr_id_ext,
1460 &class_device_attr_ioc_guid,
1461 &class_device_attr_service_id,
1462 &class_device_attr_pkey,
1463 &class_device_attr_dgid,
1464 &class_device_attr_zero_req_lim,
1468 static struct scsi_host_template srp_template = {
1469 .module = THIS_MODULE,
1471 .info = srp_target_info,
1472 .queuecommand = srp_queuecommand,
1473 .eh_abort_handler = srp_abort,
1474 .eh_device_reset_handler = srp_reset_device,
1475 .eh_host_reset_handler = srp_reset_host,
1476 .can_queue = SRP_SQ_SIZE,
1478 .cmd_per_lun = SRP_SQ_SIZE,
1479 .use_clustering = ENABLE_CLUSTERING,
1480 .shost_attrs = srp_host_attrs
1483 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1485 sprintf(target->target_name, "SRP.T10:%016llX",
1486 (unsigned long long) be64_to_cpu(target->id_ext));
1488 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1491 spin_lock(&host->target_lock);
1492 list_add_tail(&target->list, &host->target_list);
1493 spin_unlock(&host->target_lock);
1495 target->state = SRP_TARGET_LIVE;
1497 scsi_scan_target(&target->scsi_host->shost_gendev,
1498 0, target->scsi_id, SCAN_WILD_CARD, 0);
1503 static void srp_release_class_dev(struct class_device *class_dev)
1505 struct srp_host *host =
1506 container_of(class_dev, struct srp_host, class_dev);
1508 complete(&host->released);
1511 static struct class srp_class = {
1512 .name = "infiniband_srp",
1513 .release = srp_release_class_dev
1517 * Target ports are added by writing
1519 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1520 * pkey=<P_Key>,service_id=<service ID>
1522 * to the add_target sysfs attribute.
1526 SRP_OPT_ID_EXT = 1 << 0,
1527 SRP_OPT_IOC_GUID = 1 << 1,
1528 SRP_OPT_DGID = 1 << 2,
1529 SRP_OPT_PKEY = 1 << 3,
1530 SRP_OPT_SERVICE_ID = 1 << 4,
1531 SRP_OPT_MAX_SECT = 1 << 5,
1532 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1533 SRP_OPT_IO_CLASS = 1 << 7,
1534 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1538 SRP_OPT_SERVICE_ID),
1541 static match_table_t srp_opt_tokens = {
1542 { SRP_OPT_ID_EXT, "id_ext=%s" },
1543 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1544 { SRP_OPT_DGID, "dgid=%s" },
1545 { SRP_OPT_PKEY, "pkey=%x" },
1546 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1547 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1548 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
1549 { SRP_OPT_IO_CLASS, "io_class=%x" },
1550 { SRP_OPT_ERR, NULL }
1553 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1555 char *options, *sep_opt;
1558 substring_t args[MAX_OPT_ARGS];
1564 options = kstrdup(buf, GFP_KERNEL);
1569 while ((p = strsep(&sep_opt, ",")) != NULL) {
1573 token = match_token(p, srp_opt_tokens, args);
1577 case SRP_OPT_ID_EXT:
1578 p = match_strdup(args);
1579 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1583 case SRP_OPT_IOC_GUID:
1584 p = match_strdup(args);
1585 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1590 p = match_strdup(args);
1591 if (strlen(p) != 32) {
1592 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1597 for (i = 0; i < 16; ++i) {
1598 strlcpy(dgid, p + i * 2, 3);
1599 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1605 if (match_hex(args, &token)) {
1606 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1609 target->path.pkey = cpu_to_be16(token);
1612 case SRP_OPT_SERVICE_ID:
1613 p = match_strdup(args);
1614 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1618 case SRP_OPT_MAX_SECT:
1619 if (match_int(args, &token)) {
1620 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1623 target->scsi_host->max_sectors = token;
1626 case SRP_OPT_MAX_CMD_PER_LUN:
1627 if (match_int(args, &token)) {
1628 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1631 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
1634 case SRP_OPT_IO_CLASS:
1635 if (match_hex(args, &token)) {
1636 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
1639 if (token != SRP_REV10_IB_IO_CLASS &&
1640 token != SRP_REV16A_IB_IO_CLASS) {
1641 printk(KERN_WARNING PFX "unknown IO class parameter value"
1642 " %x specified (use %x or %x).\n",
1643 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1646 target->io_class = token;
1650 printk(KERN_WARNING PFX "unknown parameter or missing value "
1651 "'%s' in target creation request\n", p);
1656 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1659 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1660 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1661 !(srp_opt_tokens[i].token & opt_mask))
1662 printk(KERN_WARNING PFX "target creation request is "
1663 "missing parameter '%s'\n",
1664 srp_opt_tokens[i].pattern);
1671 static ssize_t srp_create_target(struct class_device *class_dev,
1672 const char *buf, size_t count)
1674 struct srp_host *host =
1675 container_of(class_dev, struct srp_host, class_dev);
1676 struct Scsi_Host *target_host;
1677 struct srp_target_port *target;
1681 target_host = scsi_host_alloc(&srp_template,
1682 sizeof (struct srp_target_port));
1686 target_host->max_lun = SRP_MAX_LUN;
1688 target = host_to_target(target_host);
1689 memset(target, 0, sizeof *target);
1691 target->io_class = SRP_REV16A_IB_IO_CLASS;
1692 target->scsi_host = target_host;
1693 target->srp_host = host;
1695 INIT_WORK(&target->work, srp_reconnect_work, target);
1697 INIT_LIST_HEAD(&target->free_reqs);
1698 INIT_LIST_HEAD(&target->req_queue);
1699 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1700 target->req_ring[i].index = i;
1701 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1704 ret = srp_parse_options(buf, target);
1708 ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
1710 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1711 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1712 (unsigned long long) be64_to_cpu(target->id_ext),
1713 (unsigned long long) be64_to_cpu(target->ioc_guid),
1714 be16_to_cpu(target->path.pkey),
1715 (unsigned long long) be64_to_cpu(target->service_id),
1716 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),
1717 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),
1718 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),
1719 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),
1720 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),
1721 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),
1722 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),
1723 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));
1725 ret = srp_create_target_ib(target);
1729 target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
1730 if (IS_ERR(target->cm_id)) {
1731 ret = PTR_ERR(target->cm_id);
1735 ret = srp_connect_target(target);
1737 printk(KERN_ERR PFX "Connection failed\n");
1741 ret = srp_add_target(host, target);
1743 goto err_disconnect;
1748 srp_disconnect_target(target);
1751 ib_destroy_cm_id(target->cm_id);
1754 srp_free_target_ib(target);
1757 scsi_host_put(target_host);
1762 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
1764 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1766 struct srp_host *host =
1767 container_of(class_dev, struct srp_host, class_dev);
1769 return sprintf(buf, "%s\n", host->dev->dev->name);
1772 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1774 static ssize_t show_port(struct class_device *class_dev, char *buf)
1776 struct srp_host *host =
1777 container_of(class_dev, struct srp_host, class_dev);
1779 return sprintf(buf, "%d\n", host->port);
1782 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1784 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
1786 struct srp_host *host;
1788 host = kzalloc(sizeof *host, GFP_KERNEL);
1792 INIT_LIST_HEAD(&host->target_list);
1793 spin_lock_init(&host->target_lock);
1794 init_completion(&host->released);
1798 host->initiator_port_id[7] = port;
1799 memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8);
1801 host->class_dev.class = &srp_class;
1802 host->class_dev.dev = device->dev->dma_device;
1803 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
1804 device->dev->name, port);
1806 if (class_device_register(&host->class_dev))
1808 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
1810 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
1812 if (class_device_create_file(&host->class_dev, &class_device_attr_port))
1818 class_device_unregister(&host->class_dev);
1826 static void srp_add_one(struct ib_device *device)
1828 struct srp_device *srp_dev;
1829 struct ib_device_attr *dev_attr;
1830 struct ib_fmr_pool_param fmr_param;
1831 struct srp_host *host;
1834 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
1838 if (ib_query_device(device, dev_attr)) {
1839 printk(KERN_WARNING PFX "Query device failed for %s\n",
1844 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
1849 * Use the smallest page size supported by the HCA, down to a
1850 * minimum of 512 bytes (which is the smallest sector that a
1851 * SCSI command will ever carry).
1853 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
1854 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
1855 srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1);
1857 INIT_LIST_HEAD(&srp_dev->dev_list);
1859 srp_dev->dev = device;
1860 srp_dev->pd = ib_alloc_pd(device);
1861 if (IS_ERR(srp_dev->pd))
1864 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
1865 IB_ACCESS_LOCAL_WRITE |
1866 IB_ACCESS_REMOTE_READ |
1867 IB_ACCESS_REMOTE_WRITE);
1868 if (IS_ERR(srp_dev->mr))
1871 memset(&fmr_param, 0, sizeof fmr_param);
1872 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
1873 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
1874 fmr_param.cache = 1;
1875 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
1876 fmr_param.page_shift = srp_dev->fmr_page_shift;
1877 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
1878 IB_ACCESS_REMOTE_WRITE |
1879 IB_ACCESS_REMOTE_READ);
1881 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
1882 if (IS_ERR(srp_dev->fmr_pool))
1883 srp_dev->fmr_pool = NULL;
1885 if (device->node_type == IB_NODE_SWITCH) {
1890 e = device->phys_port_cnt;
1893 for (p = s; p <= e; ++p) {
1894 host = srp_add_port(srp_dev, p);
1896 list_add_tail(&host->list, &srp_dev->dev_list);
1899 ib_set_client_data(device, &srp_client, srp_dev);
1904 ib_dealloc_pd(srp_dev->pd);
1913 static void srp_remove_one(struct ib_device *device)
1915 struct srp_device *srp_dev;
1916 struct srp_host *host, *tmp_host;
1917 LIST_HEAD(target_list);
1918 struct srp_target_port *target, *tmp_target;
1920 srp_dev = ib_get_client_data(device, &srp_client);
1922 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
1923 class_device_unregister(&host->class_dev);
1925 * Wait for the sysfs entry to go away, so that no new
1926 * target ports can be created.
1928 wait_for_completion(&host->released);
1931 * Mark all target ports as removed, so we stop queueing
1932 * commands and don't try to reconnect.
1934 spin_lock(&host->target_lock);
1935 list_for_each_entry(target, &host->target_list, list) {
1936 spin_lock_irq(target->scsi_host->host_lock);
1937 target->state = SRP_TARGET_REMOVED;
1938 spin_unlock_irq(target->scsi_host->host_lock);
1940 spin_unlock(&host->target_lock);
1943 * Wait for any reconnection tasks that may have
1944 * started before we marked our target ports as
1945 * removed, and any target port removal tasks.
1947 flush_scheduled_work();
1949 list_for_each_entry_safe(target, tmp_target,
1950 &host->target_list, list) {
1951 scsi_remove_host(target->scsi_host);
1952 srp_disconnect_target(target);
1953 ib_destroy_cm_id(target->cm_id);
1954 srp_free_target_ib(target);
1955 scsi_host_put(target->scsi_host);
1961 if (srp_dev->fmr_pool)
1962 ib_destroy_fmr_pool(srp_dev->fmr_pool);
1963 ib_dereg_mr(srp_dev->mr);
1964 ib_dealloc_pd(srp_dev->pd);
1969 static int __init srp_init_module(void)
1973 srp_template.sg_tablesize = srp_sg_tablesize;
1974 srp_max_iu_len = (sizeof (struct srp_cmd) +
1975 sizeof (struct srp_indirect_buf) +
1976 srp_sg_tablesize * 16);
1978 ret = class_register(&srp_class);
1980 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
1984 ret = ib_register_client(&srp_client);
1986 printk(KERN_ERR PFX "couldn't register IB client\n");
1987 class_unregister(&srp_class);
1994 static void __exit srp_cleanup_module(void)
1996 ib_unregister_client(&srp_client);
1997 class_unregister(&srp_class);
2000 module_init(srp_init_module);
2001 module_exit(srp_cleanup_module);