2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/err.h>
37 #include <linux/string.h>
38 #include <linux/parser.h>
39 #include <linux/random.h>
40 #include <linux/jiffies.h>
42 #include <linux/atomic.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi_dbg.h>
48 #include <scsi/scsi_transport_srp.h>
52 #define DRV_NAME "ib_srp"
53 #define PFX DRV_NAME ": "
54 #define DRV_VERSION "0.2"
55 #define DRV_RELDATE "November 1, 2005"
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
59 "v" DRV_VERSION " (" DRV_RELDATE ")");
60 MODULE_LICENSE("Dual BSD/GPL");
62 static unsigned int srp_sg_tablesize;
63 static unsigned int cmd_sg_entries;
64 static unsigned int indirect_sg_entries;
65 static bool allow_ext_sg;
66 static int topspin_workarounds = 1;
68 module_param(srp_sg_tablesize, uint, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
71 module_param(cmd_sg_entries, uint, 0444);
72 MODULE_PARM_DESC(cmd_sg_entries,
73 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
75 module_param(indirect_sg_entries, uint, 0444);
76 MODULE_PARM_DESC(indirect_sg_entries,
77 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
79 module_param(allow_ext_sg, bool, 0444);
80 MODULE_PARM_DESC(allow_ext_sg,
81 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
83 module_param(topspin_workarounds, int, 0444);
84 MODULE_PARM_DESC(topspin_workarounds,
85 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
87 static void srp_add_one(struct ib_device *device);
88 static void srp_remove_one(struct ib_device *device);
89 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
90 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
91 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
93 static struct scsi_transport_template *ib_srp_transport_template;
95 static struct ib_client srp_client = {
98 .remove = srp_remove_one
101 static struct ib_sa_client srp_sa_client;
103 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
105 return (struct srp_target_port *) host->hostdata;
108 static const char *srp_target_info(struct Scsi_Host *host)
110 return host_to_target(host)->target_name;
113 static int srp_target_is_topspin(struct srp_target_port *target)
115 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
116 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
118 return topspin_workarounds &&
119 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
120 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
123 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
125 enum dma_data_direction direction)
129 iu = kmalloc(sizeof *iu, gfp_mask);
133 iu->buf = kzalloc(size, gfp_mask);
137 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
139 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
143 iu->direction = direction;
155 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
160 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
166 static void srp_qp_event(struct ib_event *event, void *context)
168 printk(KERN_ERR PFX "QP event %d\n", event->event);
171 static int srp_init_qp(struct srp_target_port *target,
174 struct ib_qp_attr *attr;
177 attr = kmalloc(sizeof *attr, GFP_KERNEL);
181 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
182 target->srp_host->port,
183 be16_to_cpu(target->path.pkey),
188 attr->qp_state = IB_QPS_INIT;
189 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
190 IB_ACCESS_REMOTE_WRITE);
191 attr->port_num = target->srp_host->port;
193 ret = ib_modify_qp(qp, attr,
204 static int srp_new_cm_id(struct srp_target_port *target)
206 struct ib_cm_id *new_cm_id;
208 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
209 srp_cm_handler, target);
210 if (IS_ERR(new_cm_id))
211 return PTR_ERR(new_cm_id);
214 ib_destroy_cm_id(target->cm_id);
215 target->cm_id = new_cm_id;
220 static int srp_create_target_ib(struct srp_target_port *target)
222 struct ib_qp_init_attr *init_attr;
225 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
229 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
230 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
231 if (IS_ERR(target->recv_cq)) {
232 ret = PTR_ERR(target->recv_cq);
236 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
237 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
238 if (IS_ERR(target->send_cq)) {
239 ret = PTR_ERR(target->send_cq);
243 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
245 init_attr->event_handler = srp_qp_event;
246 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
247 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
248 init_attr->cap.max_recv_sge = 1;
249 init_attr->cap.max_send_sge = 1;
250 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
251 init_attr->qp_type = IB_QPT_RC;
252 init_attr->send_cq = target->send_cq;
253 init_attr->recv_cq = target->recv_cq;
255 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
256 if (IS_ERR(target->qp)) {
257 ret = PTR_ERR(target->qp);
261 ret = srp_init_qp(target, target->qp);
269 ib_destroy_qp(target->qp);
272 ib_destroy_cq(target->send_cq);
275 ib_destroy_cq(target->recv_cq);
282 static void srp_free_target_ib(struct srp_target_port *target)
286 ib_destroy_qp(target->qp);
287 ib_destroy_cq(target->send_cq);
288 ib_destroy_cq(target->recv_cq);
290 for (i = 0; i < SRP_RQ_SIZE; ++i)
291 srp_free_iu(target->srp_host, target->rx_ring[i]);
292 for (i = 0; i < SRP_SQ_SIZE; ++i)
293 srp_free_iu(target->srp_host, target->tx_ring[i]);
296 static void srp_path_rec_completion(int status,
297 struct ib_sa_path_rec *pathrec,
300 struct srp_target_port *target = target_ptr;
302 target->status = status;
304 shost_printk(KERN_ERR, target->scsi_host,
305 PFX "Got failed path rec status %d\n", status);
307 target->path = *pathrec;
308 complete(&target->done);
311 static int srp_lookup_path(struct srp_target_port *target)
315 target->path.numb_path = 1;
317 init_completion(&target->done);
320 * Avoid that the SCSI host can be removed by srp_remove_target()
321 * before srp_path_rec_completion() is called.
323 if (!scsi_host_get(target->scsi_host))
326 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
327 target->srp_host->srp_dev->dev,
328 target->srp_host->port,
330 IB_SA_PATH_REC_SERVICE_ID |
331 IB_SA_PATH_REC_DGID |
332 IB_SA_PATH_REC_SGID |
333 IB_SA_PATH_REC_NUMB_PATH |
335 SRP_PATH_REC_TIMEOUT_MS,
337 srp_path_rec_completion,
338 target, &target->path_query);
339 ret = target->path_query_id;
343 wait_for_completion(&target->done);
345 ret = target->status;
347 shost_printk(KERN_WARNING, target->scsi_host,
348 PFX "Path record query failed\n");
351 scsi_host_put(target->scsi_host);
357 static int srp_send_req(struct srp_target_port *target)
360 struct ib_cm_req_param param;
361 struct srp_login_req priv;
365 req = kzalloc(sizeof *req, GFP_KERNEL);
369 req->param.primary_path = &target->path;
370 req->param.alternate_path = NULL;
371 req->param.service_id = target->service_id;
372 req->param.qp_num = target->qp->qp_num;
373 req->param.qp_type = target->qp->qp_type;
374 req->param.private_data = &req->priv;
375 req->param.private_data_len = sizeof req->priv;
376 req->param.flow_control = 1;
378 get_random_bytes(&req->param.starting_psn, 4);
379 req->param.starting_psn &= 0xffffff;
382 * Pick some arbitrary defaults here; we could make these
383 * module parameters if anyone cared about setting them.
385 req->param.responder_resources = 4;
386 req->param.remote_cm_response_timeout = 20;
387 req->param.local_cm_response_timeout = 20;
388 req->param.retry_count = 7;
389 req->param.rnr_retry_count = 7;
390 req->param.max_cm_retries = 15;
392 req->priv.opcode = SRP_LOGIN_REQ;
394 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
395 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
396 SRP_BUF_FORMAT_INDIRECT);
398 * In the published SRP specification (draft rev. 16a), the
399 * port identifier format is 8 bytes of ID extension followed
400 * by 8 bytes of GUID. Older drafts put the two halves in the
401 * opposite order, so that the GUID comes first.
403 * Targets conforming to these obsolete drafts can be
404 * recognized by the I/O Class they report.
406 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
407 memcpy(req->priv.initiator_port_id,
408 &target->path.sgid.global.interface_id, 8);
409 memcpy(req->priv.initiator_port_id + 8,
410 &target->initiator_ext, 8);
411 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
412 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
414 memcpy(req->priv.initiator_port_id,
415 &target->initiator_ext, 8);
416 memcpy(req->priv.initiator_port_id + 8,
417 &target->path.sgid.global.interface_id, 8);
418 memcpy(req->priv.target_port_id, &target->id_ext, 8);
419 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
423 * Topspin/Cisco SRP targets will reject our login unless we
424 * zero out the first 8 bytes of our initiator port ID and set
425 * the second 8 bytes to the local node GUID.
427 if (srp_target_is_topspin(target)) {
428 shost_printk(KERN_DEBUG, target->scsi_host,
429 PFX "Topspin/Cisco initiator port ID workaround "
430 "activated for target GUID %016llx\n",
431 (unsigned long long) be64_to_cpu(target->ioc_guid));
432 memset(req->priv.initiator_port_id, 0, 8);
433 memcpy(req->priv.initiator_port_id + 8,
434 &target->srp_host->srp_dev->dev->node_guid, 8);
437 status = ib_send_cm_req(target->cm_id, &req->param);
444 static void srp_disconnect_target(struct srp_target_port *target)
446 /* XXX should send SRP_I_LOGOUT request */
448 init_completion(&target->done);
449 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
450 shost_printk(KERN_DEBUG, target->scsi_host,
451 PFX "Sending CM DREQ failed\n");
454 wait_for_completion(&target->done);
457 static bool srp_change_state(struct srp_target_port *target,
458 enum srp_target_state old,
459 enum srp_target_state new)
461 bool changed = false;
463 spin_lock_irq(&target->lock);
464 if (target->state == old) {
468 spin_unlock_irq(&target->lock);
472 static void srp_free_req_data(struct srp_target_port *target)
474 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
475 struct srp_request *req;
478 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
479 kfree(req->fmr_list);
480 kfree(req->map_page);
481 if (req->indirect_dma_addr) {
482 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
483 target->indirect_size,
486 kfree(req->indirect_desc);
490 static void srp_remove_work(struct work_struct *work)
492 struct srp_target_port *target =
493 container_of(work, struct srp_target_port, work);
495 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
498 spin_lock(&target->srp_host->target_lock);
499 list_del(&target->list);
500 spin_unlock(&target->srp_host->target_lock);
502 srp_remove_host(target->scsi_host);
503 scsi_remove_host(target->scsi_host);
504 ib_destroy_cm_id(target->cm_id);
505 srp_free_target_ib(target);
506 srp_free_req_data(target);
507 scsi_host_put(target->scsi_host);
510 static int srp_connect_target(struct srp_target_port *target)
515 ret = srp_lookup_path(target);
520 init_completion(&target->done);
521 ret = srp_send_req(target);
524 wait_for_completion(&target->done);
527 * The CM event handling code will set status to
528 * SRP_PORT_REDIRECT if we get a port redirect REJ
529 * back, or SRP_DLID_REDIRECT if we get a lid/qp
532 switch (target->status) {
536 case SRP_PORT_REDIRECT:
537 ret = srp_lookup_path(target);
542 case SRP_DLID_REDIRECT:
546 /* Our current CM id was stale, and is now in timewait.
547 * Try to reconnect with a new one.
549 if (!retries-- || srp_new_cm_id(target)) {
550 shost_printk(KERN_ERR, target->scsi_host, PFX
551 "giving up on stale connection\n");
552 target->status = -ECONNRESET;
553 return target->status;
556 shost_printk(KERN_ERR, target->scsi_host, PFX
557 "retrying stale connection\n");
561 return target->status;
566 static void srp_unmap_data(struct scsi_cmnd *scmnd,
567 struct srp_target_port *target,
568 struct srp_request *req)
570 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
571 struct ib_pool_fmr **pfmr;
573 if (!scsi_sglist(scmnd) ||
574 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
575 scmnd->sc_data_direction != DMA_FROM_DEVICE))
578 pfmr = req->fmr_list;
580 ib_fmr_pool_unmap(*pfmr++);
582 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
583 scmnd->sc_data_direction);
587 * srp_claim_req - Take ownership of the scmnd associated with a request.
588 * @target: SRP target port.
590 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
591 * ownership of @req->scmnd if it equals @scmnd.
594 * Either NULL or a pointer to the SCSI command the caller became owner of.
596 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
597 struct srp_request *req,
598 struct scsi_cmnd *scmnd)
602 spin_lock_irqsave(&target->lock, flags);
606 } else if (req->scmnd == scmnd) {
611 spin_unlock_irqrestore(&target->lock, flags);
617 * srp_free_req() - Unmap data and add request to the free request list.
619 static void srp_free_req(struct srp_target_port *target,
620 struct srp_request *req, struct scsi_cmnd *scmnd,
625 srp_unmap_data(scmnd, target, req);
627 spin_lock_irqsave(&target->lock, flags);
628 target->req_lim += req_lim_delta;
629 list_add_tail(&req->list, &target->free_reqs);
630 spin_unlock_irqrestore(&target->lock, flags);
633 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
635 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
638 srp_free_req(target, req, scmnd, 0);
639 scmnd->result = DID_RESET << 16;
640 scmnd->scsi_done(scmnd);
644 static int srp_reconnect_target(struct srp_target_port *target)
646 struct ib_qp_attr qp_attr;
650 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
653 srp_disconnect_target(target);
655 * Now get a new local CM ID so that we avoid confusing the
656 * target in case things are really fouled up.
658 ret = srp_new_cm_id(target);
662 qp_attr.qp_state = IB_QPS_RESET;
663 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
667 ret = srp_init_qp(target, target->qp);
671 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
673 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
676 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
677 struct srp_request *req = &target->req_ring[i];
679 srp_reset_req(target, req);
682 INIT_LIST_HEAD(&target->free_tx);
683 for (i = 0; i < SRP_SQ_SIZE; ++i)
684 list_add(&target->tx_ring[i]->list, &target->free_tx);
686 target->qp_in_error = 0;
687 ret = srp_connect_target(target);
691 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
697 shost_printk(KERN_ERR, target->scsi_host,
698 PFX "reconnect failed (%d), removing target port.\n", ret);
701 * We couldn't reconnect, so kill our target port off.
702 * However, we have to defer the real removal because we
703 * are in the context of the SCSI error handler now, which
704 * will deadlock if we call scsi_remove_host().
706 * Schedule our work inside the lock to avoid a race with
707 * the flush_scheduled_work() in srp_remove_one().
709 spin_lock_irq(&target->lock);
710 if (target->state == SRP_TARGET_CONNECTING) {
711 target->state = SRP_TARGET_DEAD;
712 INIT_WORK(&target->work, srp_remove_work);
713 queue_work(ib_wq, &target->work);
715 spin_unlock_irq(&target->lock);
720 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
721 unsigned int dma_len, u32 rkey)
723 struct srp_direct_buf *desc = state->desc;
725 desc->va = cpu_to_be64(dma_addr);
726 desc->key = cpu_to_be32(rkey);
727 desc->len = cpu_to_be32(dma_len);
729 state->total_len += dma_len;
734 static int srp_map_finish_fmr(struct srp_map_state *state,
735 struct srp_target_port *target)
737 struct srp_device *dev = target->srp_host->srp_dev;
738 struct ib_pool_fmr *fmr;
744 if (state->npages == 1) {
745 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
747 state->npages = state->fmr_len = 0;
751 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
752 state->npages, io_addr);
756 *state->next_fmr++ = fmr;
759 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
760 state->npages = state->fmr_len = 0;
764 static void srp_map_update_start(struct srp_map_state *state,
765 struct scatterlist *sg, int sg_index,
768 state->unmapped_sg = sg;
769 state->unmapped_index = sg_index;
770 state->unmapped_addr = dma_addr;
773 static int srp_map_sg_entry(struct srp_map_state *state,
774 struct srp_target_port *target,
775 struct scatterlist *sg, int sg_index,
778 struct srp_device *dev = target->srp_host->srp_dev;
779 struct ib_device *ibdev = dev->dev;
780 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
781 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
788 if (use_fmr == SRP_MAP_NO_FMR) {
789 /* Once we're in direct map mode for a request, we don't
790 * go back to FMR mode, so no need to update anything
791 * other than the descriptor.
793 srp_map_desc(state, dma_addr, dma_len, target->rkey);
797 /* If we start at an offset into the FMR page, don't merge into
798 * the current FMR. Finish it out, and use the kernel's MR for this
799 * sg entry. This is to avoid potential bugs on some SRP targets
800 * that were never quite defined, but went away when the initiator
801 * avoided using FMR on such page fragments.
803 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
804 ret = srp_map_finish_fmr(state, target);
808 srp_map_desc(state, dma_addr, dma_len, target->rkey);
809 srp_map_update_start(state, NULL, 0, 0);
813 /* If this is the first sg to go into the FMR, save our position.
814 * We need to know the first unmapped entry, its index, and the
815 * first unmapped address within that entry to be able to restart
816 * mapping after an error.
818 if (!state->unmapped_sg)
819 srp_map_update_start(state, sg, sg_index, dma_addr);
822 if (state->npages == SRP_FMR_SIZE) {
823 ret = srp_map_finish_fmr(state, target);
827 srp_map_update_start(state, sg, sg_index, dma_addr);
830 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
833 state->base_dma_addr = dma_addr;
834 state->pages[state->npages++] = dma_addr;
835 state->fmr_len += len;
840 /* If the last entry of the FMR wasn't a full page, then we need to
841 * close it out and start a new one -- we can only merge at page
845 if (len != dev->fmr_page_size) {
846 ret = srp_map_finish_fmr(state, target);
848 srp_map_update_start(state, NULL, 0, 0);
853 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
854 struct srp_request *req)
856 struct scatterlist *scat, *sg;
857 struct srp_cmd *cmd = req->cmd->buf;
858 int i, len, nents, count, use_fmr;
859 struct srp_device *dev;
860 struct ib_device *ibdev;
861 struct srp_map_state state;
862 struct srp_indirect_buf *indirect_hdr;
866 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
867 return sizeof (struct srp_cmd);
869 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
870 scmnd->sc_data_direction != DMA_TO_DEVICE) {
871 shost_printk(KERN_WARNING, target->scsi_host,
872 PFX "Unhandled data direction %d\n",
873 scmnd->sc_data_direction);
877 nents = scsi_sg_count(scmnd);
878 scat = scsi_sglist(scmnd);
880 dev = target->srp_host->srp_dev;
883 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
884 if (unlikely(count == 0))
887 fmt = SRP_DATA_DESC_DIRECT;
888 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
892 * The midlayer only generated a single gather/scatter
893 * entry, or DMA mapping coalesced everything to a
894 * single entry. So a direct descriptor along with
895 * the DMA MR suffices.
897 struct srp_direct_buf *buf = (void *) cmd->add_data;
899 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
900 buf->key = cpu_to_be32(target->rkey);
901 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
907 /* We have more than one scatter/gather entry, so build our indirect
908 * descriptor table, trying to merge as many entries with FMR as we
911 indirect_hdr = (void *) cmd->add_data;
913 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
914 target->indirect_size, DMA_TO_DEVICE);
916 memset(&state, 0, sizeof(state));
917 state.desc = req->indirect_desc;
918 state.pages = req->map_page;
919 state.next_fmr = req->fmr_list;
921 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
923 for_each_sg(scat, sg, count, i) {
924 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
925 /* FMR mapping failed, so backtrack to the first
926 * unmapped entry and continue on without using FMR.
929 unsigned int dma_len;
932 sg = state.unmapped_sg;
933 i = state.unmapped_index;
935 dma_addr = ib_sg_dma_address(ibdev, sg);
936 dma_len = ib_sg_dma_len(ibdev, sg);
937 dma_len -= (state.unmapped_addr - dma_addr);
938 dma_addr = state.unmapped_addr;
939 use_fmr = SRP_MAP_NO_FMR;
940 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
944 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
947 /* We've mapped the request, now pull as much of the indirect
948 * descriptor table as we can into the command buffer. If this
949 * target is not using an external indirect table, we are
950 * guaranteed to fit into the command, as the SCSI layer won't
951 * give us more S/G entries than we allow.
953 req->nfmr = state.nfmr;
954 if (state.ndesc == 1) {
955 /* FMR mapping was able to collapse this to one entry,
956 * so use a direct descriptor.
958 struct srp_direct_buf *buf = (void *) cmd->add_data;
960 *buf = req->indirect_desc[0];
964 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
965 !target->allow_ext_sg)) {
966 shost_printk(KERN_ERR, target->scsi_host,
967 "Could not fit S/G list into SRP_CMD\n");
971 count = min(state.ndesc, target->cmd_sg_cnt);
972 table_len = state.ndesc * sizeof (struct srp_direct_buf);
974 fmt = SRP_DATA_DESC_INDIRECT;
975 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
976 len += count * sizeof (struct srp_direct_buf);
978 memcpy(indirect_hdr->desc_list, req->indirect_desc,
979 count * sizeof (struct srp_direct_buf));
981 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
982 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
983 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
984 indirect_hdr->len = cpu_to_be32(state.total_len);
986 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
987 cmd->data_out_desc_cnt = count;
989 cmd->data_in_desc_cnt = count;
991 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
995 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
996 cmd->buf_fmt = fmt << 4;
1004 * Return an IU and possible credit to the free pool
1006 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1007 enum srp_iu_type iu_type)
1009 unsigned long flags;
1011 spin_lock_irqsave(&target->lock, flags);
1012 list_add(&iu->list, &target->free_tx);
1013 if (iu_type != SRP_IU_RSP)
1015 spin_unlock_irqrestore(&target->lock, flags);
1019 * Must be called with target->lock held to protect req_lim and free_tx.
1020 * If IU is not sent, it must be returned using srp_put_tx_iu().
1023 * An upper limit for the number of allocated information units for each
1025 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1026 * more than Scsi_Host.can_queue requests.
1027 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1028 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1029 * one unanswered SRP request to an initiator.
1031 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1032 enum srp_iu_type iu_type)
1034 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1037 srp_send_completion(target->send_cq, target);
1039 if (list_empty(&target->free_tx))
1042 /* Initiator responses to target requests do not consume credits */
1043 if (iu_type != SRP_IU_RSP) {
1044 if (target->req_lim <= rsv) {
1045 ++target->zero_req_lim;
1052 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1053 list_del(&iu->list);
1057 static int srp_post_send(struct srp_target_port *target,
1058 struct srp_iu *iu, int len)
1061 struct ib_send_wr wr, *bad_wr;
1063 list.addr = iu->dma;
1065 list.lkey = target->lkey;
1068 wr.wr_id = (uintptr_t) iu;
1071 wr.opcode = IB_WR_SEND;
1072 wr.send_flags = IB_SEND_SIGNALED;
1074 return ib_post_send(target->qp, &wr, &bad_wr);
1077 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1079 struct ib_recv_wr wr, *bad_wr;
1082 list.addr = iu->dma;
1083 list.length = iu->size;
1084 list.lkey = target->lkey;
1087 wr.wr_id = (uintptr_t) iu;
1091 return ib_post_recv(target->qp, &wr, &bad_wr);
1094 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1096 struct srp_request *req;
1097 struct scsi_cmnd *scmnd;
1098 unsigned long flags;
1100 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1101 spin_lock_irqsave(&target->lock, flags);
1102 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1103 spin_unlock_irqrestore(&target->lock, flags);
1105 target->tsk_mgmt_status = -1;
1106 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1107 target->tsk_mgmt_status = rsp->data[3];
1108 complete(&target->tsk_mgmt_done);
1110 req = &target->req_ring[rsp->tag];
1111 scmnd = srp_claim_req(target, req, NULL);
1113 shost_printk(KERN_ERR, target->scsi_host,
1114 "Null scmnd for RSP w/tag %016llx\n",
1115 (unsigned long long) rsp->tag);
1117 spin_lock_irqsave(&target->lock, flags);
1118 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1119 spin_unlock_irqrestore(&target->lock, flags);
1123 scmnd->result = rsp->status;
1125 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1126 memcpy(scmnd->sense_buffer, rsp->data +
1127 be32_to_cpu(rsp->resp_data_len),
1128 min_t(int, be32_to_cpu(rsp->sense_data_len),
1129 SCSI_SENSE_BUFFERSIZE));
1132 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1133 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1134 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1135 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1137 srp_free_req(target, req, scmnd,
1138 be32_to_cpu(rsp->req_lim_delta));
1140 scmnd->host_scribble = NULL;
1141 scmnd->scsi_done(scmnd);
1145 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1148 struct ib_device *dev = target->srp_host->srp_dev->dev;
1149 unsigned long flags;
1153 spin_lock_irqsave(&target->lock, flags);
1154 target->req_lim += req_delta;
1155 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1156 spin_unlock_irqrestore(&target->lock, flags);
1159 shost_printk(KERN_ERR, target->scsi_host, PFX
1160 "no IU available to send response\n");
1164 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1165 memcpy(iu->buf, rsp, len);
1166 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1168 err = srp_post_send(target, iu, len);
1170 shost_printk(KERN_ERR, target->scsi_host, PFX
1171 "unable to post response: %d\n", err);
1172 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1178 static void srp_process_cred_req(struct srp_target_port *target,
1179 struct srp_cred_req *req)
1181 struct srp_cred_rsp rsp = {
1182 .opcode = SRP_CRED_RSP,
1185 s32 delta = be32_to_cpu(req->req_lim_delta);
1187 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1188 shost_printk(KERN_ERR, target->scsi_host, PFX
1189 "problems processing SRP_CRED_REQ\n");
1192 static void srp_process_aer_req(struct srp_target_port *target,
1193 struct srp_aer_req *req)
1195 struct srp_aer_rsp rsp = {
1196 .opcode = SRP_AER_RSP,
1199 s32 delta = be32_to_cpu(req->req_lim_delta);
1201 shost_printk(KERN_ERR, target->scsi_host, PFX
1202 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1204 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1205 shost_printk(KERN_ERR, target->scsi_host, PFX
1206 "problems processing SRP_AER_REQ\n");
1209 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1211 struct ib_device *dev = target->srp_host->srp_dev->dev;
1212 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1216 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1219 opcode = *(u8 *) iu->buf;
1222 shost_printk(KERN_ERR, target->scsi_host,
1223 PFX "recv completion, opcode 0x%02x\n", opcode);
1224 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1225 iu->buf, wc->byte_len, true);
1230 srp_process_rsp(target, iu->buf);
1234 srp_process_cred_req(target, iu->buf);
1238 srp_process_aer_req(target, iu->buf);
1242 /* XXX Handle target logout */
1243 shost_printk(KERN_WARNING, target->scsi_host,
1244 PFX "Got target logout request\n");
1248 shost_printk(KERN_WARNING, target->scsi_host,
1249 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1253 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1256 res = srp_post_recv(target, iu);
1258 shost_printk(KERN_ERR, target->scsi_host,
1259 PFX "Recv failed with error code %d\n", res);
1262 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1264 struct srp_target_port *target = target_ptr;
1267 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1268 while (ib_poll_cq(cq, 1, &wc) > 0) {
1270 shost_printk(KERN_ERR, target->scsi_host,
1271 PFX "failed receive status %d\n",
1273 target->qp_in_error = 1;
1277 srp_handle_recv(target, &wc);
1281 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1283 struct srp_target_port *target = target_ptr;
1287 while (ib_poll_cq(cq, 1, &wc) > 0) {
1289 shost_printk(KERN_ERR, target->scsi_host,
1290 PFX "failed send status %d\n",
1292 target->qp_in_error = 1;
1296 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1297 list_add(&iu->list, &target->free_tx);
1301 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1303 struct srp_target_port *target = host_to_target(shost);
1304 struct srp_request *req;
1306 struct srp_cmd *cmd;
1307 struct ib_device *dev;
1308 unsigned long flags;
1311 if (target->state == SRP_TARGET_CONNECTING)
1314 if (target->state == SRP_TARGET_DEAD ||
1315 target->state == SRP_TARGET_REMOVED) {
1316 scmnd->result = DID_BAD_TARGET << 16;
1317 scmnd->scsi_done(scmnd);
1321 spin_lock_irqsave(&target->lock, flags);
1322 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1326 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1327 list_del(&req->list);
1328 spin_unlock_irqrestore(&target->lock, flags);
1330 dev = target->srp_host->srp_dev->dev;
1331 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1335 scmnd->host_scribble = (void *) req;
1338 memset(cmd, 0, sizeof *cmd);
1340 cmd->opcode = SRP_CMD;
1341 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1342 cmd->tag = req->index;
1343 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1348 len = srp_map_data(scmnd, target, req);
1350 shost_printk(KERN_ERR, target->scsi_host,
1351 PFX "Failed to map data\n");
1355 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1358 if (srp_post_send(target, iu, len)) {
1359 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1366 srp_unmap_data(scmnd, target, req);
1369 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1372 * Avoid that the loops that iterate over the request ring can
1373 * encounter a dangling SCSI command pointer.
1377 spin_lock_irqsave(&target->lock, flags);
1378 list_add(&req->list, &target->free_reqs);
1381 spin_unlock_irqrestore(&target->lock, flags);
1384 return SCSI_MLQUEUE_HOST_BUSY;
1387 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1391 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1392 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1393 target->max_ti_iu_len,
1394 GFP_KERNEL, DMA_FROM_DEVICE);
1395 if (!target->rx_ring[i])
1399 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1400 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1402 GFP_KERNEL, DMA_TO_DEVICE);
1403 if (!target->tx_ring[i])
1406 list_add(&target->tx_ring[i]->list, &target->free_tx);
1412 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1413 srp_free_iu(target->srp_host, target->rx_ring[i]);
1414 target->rx_ring[i] = NULL;
1417 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1418 srp_free_iu(target->srp_host, target->tx_ring[i]);
1419 target->tx_ring[i] = NULL;
1425 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1426 struct srp_login_rsp *lrsp,
1427 struct srp_target_port *target)
1429 struct ib_qp_attr *qp_attr = NULL;
1434 if (lrsp->opcode == SRP_LOGIN_RSP) {
1435 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1436 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1439 * Reserve credits for task management so we don't
1440 * bounce requests back to the SCSI mid-layer.
1442 target->scsi_host->can_queue
1443 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1444 target->scsi_host->can_queue);
1446 shost_printk(KERN_WARNING, target->scsi_host,
1447 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1452 if (!target->rx_ring[0]) {
1453 ret = srp_alloc_iu_bufs(target);
1459 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1463 qp_attr->qp_state = IB_QPS_RTR;
1464 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1468 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1472 for (i = 0; i < SRP_RQ_SIZE; i++) {
1473 struct srp_iu *iu = target->rx_ring[i];
1474 ret = srp_post_recv(target, iu);
1479 qp_attr->qp_state = IB_QPS_RTS;
1480 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1484 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1488 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1494 target->status = ret;
1497 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1498 struct ib_cm_event *event,
1499 struct srp_target_port *target)
1501 struct Scsi_Host *shost = target->scsi_host;
1502 struct ib_class_port_info *cpi;
1505 switch (event->param.rej_rcvd.reason) {
1506 case IB_CM_REJ_PORT_CM_REDIRECT:
1507 cpi = event->param.rej_rcvd.ari;
1508 target->path.dlid = cpi->redirect_lid;
1509 target->path.pkey = cpi->redirect_pkey;
1510 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1511 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1513 target->status = target->path.dlid ?
1514 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1517 case IB_CM_REJ_PORT_REDIRECT:
1518 if (srp_target_is_topspin(target)) {
1520 * Topspin/Cisco SRP gateways incorrectly send
1521 * reject reason code 25 when they mean 24
1524 memcpy(target->path.dgid.raw,
1525 event->param.rej_rcvd.ari, 16);
1527 shost_printk(KERN_DEBUG, shost,
1528 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1529 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1530 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1532 target->status = SRP_PORT_REDIRECT;
1534 shost_printk(KERN_WARNING, shost,
1535 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1536 target->status = -ECONNRESET;
1540 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1541 shost_printk(KERN_WARNING, shost,
1542 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1543 target->status = -ECONNRESET;
1546 case IB_CM_REJ_CONSUMER_DEFINED:
1547 opcode = *(u8 *) event->private_data;
1548 if (opcode == SRP_LOGIN_REJ) {
1549 struct srp_login_rej *rej = event->private_data;
1550 u32 reason = be32_to_cpu(rej->reason);
1552 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1553 shost_printk(KERN_WARNING, shost,
1554 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1556 shost_printk(KERN_WARNING, shost,
1557 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1559 shost_printk(KERN_WARNING, shost,
1560 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1561 " opcode 0x%02x\n", opcode);
1562 target->status = -ECONNRESET;
1565 case IB_CM_REJ_STALE_CONN:
1566 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1567 target->status = SRP_STALE_CONN;
1571 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1572 event->param.rej_rcvd.reason);
1573 target->status = -ECONNRESET;
1577 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1579 struct srp_target_port *target = cm_id->context;
1582 switch (event->event) {
1583 case IB_CM_REQ_ERROR:
1584 shost_printk(KERN_DEBUG, target->scsi_host,
1585 PFX "Sending CM REQ failed\n");
1587 target->status = -ECONNRESET;
1590 case IB_CM_REP_RECEIVED:
1592 srp_cm_rep_handler(cm_id, event->private_data, target);
1595 case IB_CM_REJ_RECEIVED:
1596 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1599 srp_cm_rej_handler(cm_id, event, target);
1602 case IB_CM_DREQ_RECEIVED:
1603 shost_printk(KERN_WARNING, target->scsi_host,
1604 PFX "DREQ received - connection closed\n");
1605 if (ib_send_cm_drep(cm_id, NULL, 0))
1606 shost_printk(KERN_ERR, target->scsi_host,
1607 PFX "Sending CM DREP failed\n");
1610 case IB_CM_TIMEWAIT_EXIT:
1611 shost_printk(KERN_ERR, target->scsi_host,
1612 PFX "connection closed\n");
1618 case IB_CM_MRA_RECEIVED:
1619 case IB_CM_DREQ_ERROR:
1620 case IB_CM_DREP_RECEIVED:
1624 shost_printk(KERN_WARNING, target->scsi_host,
1625 PFX "Unhandled CM event %d\n", event->event);
1630 complete(&target->done);
1635 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1636 u64 req_tag, unsigned int lun, u8 func)
1638 struct ib_device *dev = target->srp_host->srp_dev->dev;
1640 struct srp_tsk_mgmt *tsk_mgmt;
1642 if (target->state == SRP_TARGET_DEAD ||
1643 target->state == SRP_TARGET_REMOVED)
1646 init_completion(&target->tsk_mgmt_done);
1648 spin_lock_irq(&target->lock);
1649 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1650 spin_unlock_irq(&target->lock);
1655 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1658 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1660 tsk_mgmt->opcode = SRP_TSK_MGMT;
1661 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1662 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1663 tsk_mgmt->tsk_mgmt_func = func;
1664 tsk_mgmt->task_tag = req_tag;
1666 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1668 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1669 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1673 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1674 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1680 static int srp_abort(struct scsi_cmnd *scmnd)
1682 struct srp_target_port *target = host_to_target(scmnd->device->host);
1683 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1685 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1687 if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
1689 srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1690 SRP_TSK_ABORT_TASK);
1691 srp_free_req(target, req, scmnd, 0);
1692 scmnd->result = DID_ABORT << 16;
1693 scmnd->scsi_done(scmnd);
1698 static int srp_reset_device(struct scsi_cmnd *scmnd)
1700 struct srp_target_port *target = host_to_target(scmnd->device->host);
1703 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1705 if (target->qp_in_error)
1707 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1710 if (target->tsk_mgmt_status)
1713 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1714 struct srp_request *req = &target->req_ring[i];
1715 if (req->scmnd && req->scmnd->device == scmnd->device)
1716 srp_reset_req(target, req);
1722 static int srp_reset_host(struct scsi_cmnd *scmnd)
1724 struct srp_target_port *target = host_to_target(scmnd->device->host);
1727 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1729 if (!srp_reconnect_target(target))
1735 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1738 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1740 if (target->state == SRP_TARGET_DEAD ||
1741 target->state == SRP_TARGET_REMOVED)
1744 return sprintf(buf, "0x%016llx\n",
1745 (unsigned long long) be64_to_cpu(target->id_ext));
1748 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1751 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1753 if (target->state == SRP_TARGET_DEAD ||
1754 target->state == SRP_TARGET_REMOVED)
1757 return sprintf(buf, "0x%016llx\n",
1758 (unsigned long long) be64_to_cpu(target->ioc_guid));
1761 static ssize_t show_service_id(struct device *dev,
1762 struct device_attribute *attr, char *buf)
1764 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1766 if (target->state == SRP_TARGET_DEAD ||
1767 target->state == SRP_TARGET_REMOVED)
1770 return sprintf(buf, "0x%016llx\n",
1771 (unsigned long long) be64_to_cpu(target->service_id));
1774 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1777 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1779 if (target->state == SRP_TARGET_DEAD ||
1780 target->state == SRP_TARGET_REMOVED)
1783 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1786 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1789 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1791 if (target->state == SRP_TARGET_DEAD ||
1792 target->state == SRP_TARGET_REMOVED)
1795 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1798 static ssize_t show_orig_dgid(struct device *dev,
1799 struct device_attribute *attr, char *buf)
1801 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1803 if (target->state == SRP_TARGET_DEAD ||
1804 target->state == SRP_TARGET_REMOVED)
1807 return sprintf(buf, "%pI6\n", target->orig_dgid);
1810 static ssize_t show_req_lim(struct device *dev,
1811 struct device_attribute *attr, char *buf)
1813 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1815 if (target->state == SRP_TARGET_DEAD ||
1816 target->state == SRP_TARGET_REMOVED)
1819 return sprintf(buf, "%d\n", target->req_lim);
1822 static ssize_t show_zero_req_lim(struct device *dev,
1823 struct device_attribute *attr, char *buf)
1825 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1827 if (target->state == SRP_TARGET_DEAD ||
1828 target->state == SRP_TARGET_REMOVED)
1831 return sprintf(buf, "%d\n", target->zero_req_lim);
1834 static ssize_t show_local_ib_port(struct device *dev,
1835 struct device_attribute *attr, char *buf)
1837 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1839 return sprintf(buf, "%d\n", target->srp_host->port);
1842 static ssize_t show_local_ib_device(struct device *dev,
1843 struct device_attribute *attr, char *buf)
1845 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1847 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1850 static ssize_t show_cmd_sg_entries(struct device *dev,
1851 struct device_attribute *attr, char *buf)
1853 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1855 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1858 static ssize_t show_allow_ext_sg(struct device *dev,
1859 struct device_attribute *attr, char *buf)
1861 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1863 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
1866 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1867 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1868 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1869 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1870 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1871 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
1872 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
1873 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1874 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1875 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1876 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
1877 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
1879 static struct device_attribute *srp_host_attrs[] = {
1882 &dev_attr_service_id,
1885 &dev_attr_orig_dgid,
1887 &dev_attr_zero_req_lim,
1888 &dev_attr_local_ib_port,
1889 &dev_attr_local_ib_device,
1890 &dev_attr_cmd_sg_entries,
1891 &dev_attr_allow_ext_sg,
1895 static struct scsi_host_template srp_template = {
1896 .module = THIS_MODULE,
1897 .name = "InfiniBand SRP initiator",
1898 .proc_name = DRV_NAME,
1899 .info = srp_target_info,
1900 .queuecommand = srp_queuecommand,
1901 .eh_abort_handler = srp_abort,
1902 .eh_device_reset_handler = srp_reset_device,
1903 .eh_host_reset_handler = srp_reset_host,
1904 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
1905 .can_queue = SRP_CMD_SQ_SIZE,
1907 .cmd_per_lun = SRP_CMD_SQ_SIZE,
1908 .use_clustering = ENABLE_CLUSTERING,
1909 .shost_attrs = srp_host_attrs
1912 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1914 struct srp_rport_identifiers ids;
1915 struct srp_rport *rport;
1917 sprintf(target->target_name, "SRP.T10:%016llX",
1918 (unsigned long long) be64_to_cpu(target->id_ext));
1920 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
1923 memcpy(ids.port_id, &target->id_ext, 8);
1924 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1925 ids.roles = SRP_RPORT_ROLE_TARGET;
1926 rport = srp_rport_add(target->scsi_host, &ids);
1927 if (IS_ERR(rport)) {
1928 scsi_remove_host(target->scsi_host);
1929 return PTR_ERR(rport);
1932 spin_lock(&host->target_lock);
1933 list_add_tail(&target->list, &host->target_list);
1934 spin_unlock(&host->target_lock);
1936 target->state = SRP_TARGET_LIVE;
1938 scsi_scan_target(&target->scsi_host->shost_gendev,
1939 0, target->scsi_id, SCAN_WILD_CARD, 0);
1944 static void srp_release_dev(struct device *dev)
1946 struct srp_host *host =
1947 container_of(dev, struct srp_host, dev);
1949 complete(&host->released);
1952 static struct class srp_class = {
1953 .name = "infiniband_srp",
1954 .dev_release = srp_release_dev
1958 * Target ports are added by writing
1960 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1961 * pkey=<P_Key>,service_id=<service ID>
1963 * to the add_target sysfs attribute.
1967 SRP_OPT_ID_EXT = 1 << 0,
1968 SRP_OPT_IOC_GUID = 1 << 1,
1969 SRP_OPT_DGID = 1 << 2,
1970 SRP_OPT_PKEY = 1 << 3,
1971 SRP_OPT_SERVICE_ID = 1 << 4,
1972 SRP_OPT_MAX_SECT = 1 << 5,
1973 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1974 SRP_OPT_IO_CLASS = 1 << 7,
1975 SRP_OPT_INITIATOR_EXT = 1 << 8,
1976 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
1977 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
1978 SRP_OPT_SG_TABLESIZE = 1 << 11,
1979 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1983 SRP_OPT_SERVICE_ID),
1986 static const match_table_t srp_opt_tokens = {
1987 { SRP_OPT_ID_EXT, "id_ext=%s" },
1988 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1989 { SRP_OPT_DGID, "dgid=%s" },
1990 { SRP_OPT_PKEY, "pkey=%x" },
1991 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1992 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1993 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
1994 { SRP_OPT_IO_CLASS, "io_class=%x" },
1995 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
1996 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
1997 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
1998 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
1999 { SRP_OPT_ERR, NULL }
2002 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2004 char *options, *sep_opt;
2007 substring_t args[MAX_OPT_ARGS];
2013 options = kstrdup(buf, GFP_KERNEL);
2018 while ((p = strsep(&sep_opt, ",")) != NULL) {
2022 token = match_token(p, srp_opt_tokens, args);
2026 case SRP_OPT_ID_EXT:
2027 p = match_strdup(args);
2032 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2036 case SRP_OPT_IOC_GUID:
2037 p = match_strdup(args);
2042 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2047 p = match_strdup(args);
2052 if (strlen(p) != 32) {
2053 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
2058 for (i = 0; i < 16; ++i) {
2059 strlcpy(dgid, p + i * 2, 3);
2060 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2063 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2067 if (match_hex(args, &token)) {
2068 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
2071 target->path.pkey = cpu_to_be16(token);
2074 case SRP_OPT_SERVICE_ID:
2075 p = match_strdup(args);
2080 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2081 target->path.service_id = target->service_id;
2085 case SRP_OPT_MAX_SECT:
2086 if (match_int(args, &token)) {
2087 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
2090 target->scsi_host->max_sectors = token;
2093 case SRP_OPT_MAX_CMD_PER_LUN:
2094 if (match_int(args, &token)) {
2095 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
2098 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
2101 case SRP_OPT_IO_CLASS:
2102 if (match_hex(args, &token)) {
2103 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
2106 if (token != SRP_REV10_IB_IO_CLASS &&
2107 token != SRP_REV16A_IB_IO_CLASS) {
2108 printk(KERN_WARNING PFX "unknown IO class parameter value"
2109 " %x specified (use %x or %x).\n",
2110 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
2113 target->io_class = token;
2116 case SRP_OPT_INITIATOR_EXT:
2117 p = match_strdup(args);
2122 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2126 case SRP_OPT_CMD_SG_ENTRIES:
2127 if (match_int(args, &token) || token < 1 || token > 255) {
2128 printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p);
2131 target->cmd_sg_cnt = token;
2134 case SRP_OPT_ALLOW_EXT_SG:
2135 if (match_int(args, &token)) {
2136 printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p);
2139 target->allow_ext_sg = !!token;
2142 case SRP_OPT_SG_TABLESIZE:
2143 if (match_int(args, &token) || token < 1 ||
2144 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2145 printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p);
2148 target->sg_tablesize = token;
2152 printk(KERN_WARNING PFX "unknown parameter or missing value "
2153 "'%s' in target creation request\n", p);
2158 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2161 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2162 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2163 !(srp_opt_tokens[i].token & opt_mask))
2164 printk(KERN_WARNING PFX "target creation request is "
2165 "missing parameter '%s'\n",
2166 srp_opt_tokens[i].pattern);
2173 static ssize_t srp_create_target(struct device *dev,
2174 struct device_attribute *attr,
2175 const char *buf, size_t count)
2177 struct srp_host *host =
2178 container_of(dev, struct srp_host, dev);
2179 struct Scsi_Host *target_host;
2180 struct srp_target_port *target;
2181 struct ib_device *ibdev = host->srp_dev->dev;
2182 dma_addr_t dma_addr;
2185 target_host = scsi_host_alloc(&srp_template,
2186 sizeof (struct srp_target_port));
2190 target_host->transportt = ib_srp_transport_template;
2191 target_host->max_channel = 0;
2192 target_host->max_id = 1;
2193 target_host->max_lun = SRP_MAX_LUN;
2194 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2196 target = host_to_target(target_host);
2198 target->io_class = SRP_REV16A_IB_IO_CLASS;
2199 target->scsi_host = target_host;
2200 target->srp_host = host;
2201 target->lkey = host->srp_dev->mr->lkey;
2202 target->rkey = host->srp_dev->mr->rkey;
2203 target->cmd_sg_cnt = cmd_sg_entries;
2204 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2205 target->allow_ext_sg = allow_ext_sg;
2207 ret = srp_parse_options(buf, target);
2211 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2212 target->cmd_sg_cnt < target->sg_tablesize) {
2213 printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2214 target->sg_tablesize = target->cmd_sg_cnt;
2217 target_host->sg_tablesize = target->sg_tablesize;
2218 target->indirect_size = target->sg_tablesize *
2219 sizeof (struct srp_direct_buf);
2220 target->max_iu_len = sizeof (struct srp_cmd) +
2221 sizeof (struct srp_indirect_buf) +
2222 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2224 spin_lock_init(&target->lock);
2225 INIT_LIST_HEAD(&target->free_tx);
2226 INIT_LIST_HEAD(&target->free_reqs);
2227 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2228 struct srp_request *req = &target->req_ring[i];
2230 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2232 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2234 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2235 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
2238 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2239 target->indirect_size,
2241 if (ib_dma_mapping_error(ibdev, dma_addr))
2244 req->indirect_dma_addr = dma_addr;
2246 list_add_tail(&req->list, &target->free_reqs);
2249 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2251 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2252 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2253 "service_id %016llx dgid %pI6\n",
2254 (unsigned long long) be64_to_cpu(target->id_ext),
2255 (unsigned long long) be64_to_cpu(target->ioc_guid),
2256 be16_to_cpu(target->path.pkey),
2257 (unsigned long long) be64_to_cpu(target->service_id),
2258 target->path.dgid.raw);
2260 ret = srp_create_target_ib(target);
2264 ret = srp_new_cm_id(target);
2268 target->qp_in_error = 0;
2269 ret = srp_connect_target(target);
2271 shost_printk(KERN_ERR, target->scsi_host,
2272 PFX "Connection failed\n");
2276 ret = srp_add_target(host, target);
2278 goto err_disconnect;
2283 srp_disconnect_target(target);
2286 ib_destroy_cm_id(target->cm_id);
2289 srp_free_target_ib(target);
2292 srp_free_req_data(target);
2295 scsi_host_put(target_host);
2300 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2302 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2305 struct srp_host *host = container_of(dev, struct srp_host, dev);
2307 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2310 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2312 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2315 struct srp_host *host = container_of(dev, struct srp_host, dev);
2317 return sprintf(buf, "%d\n", host->port);
2320 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2322 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2324 struct srp_host *host;
2326 host = kzalloc(sizeof *host, GFP_KERNEL);
2330 INIT_LIST_HEAD(&host->target_list);
2331 spin_lock_init(&host->target_lock);
2332 init_completion(&host->released);
2333 host->srp_dev = device;
2336 host->dev.class = &srp_class;
2337 host->dev.parent = device->dev->dma_device;
2338 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2340 if (device_register(&host->dev))
2342 if (device_create_file(&host->dev, &dev_attr_add_target))
2344 if (device_create_file(&host->dev, &dev_attr_ibdev))
2346 if (device_create_file(&host->dev, &dev_attr_port))
2352 device_unregister(&host->dev);
2360 static void srp_add_one(struct ib_device *device)
2362 struct srp_device *srp_dev;
2363 struct ib_device_attr *dev_attr;
2364 struct ib_fmr_pool_param fmr_param;
2365 struct srp_host *host;
2366 int max_pages_per_fmr, fmr_page_shift, s, e, p;
2368 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2372 if (ib_query_device(device, dev_attr)) {
2373 printk(KERN_WARNING PFX "Query device failed for %s\n",
2378 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2383 * Use the smallest page size supported by the HCA, down to a
2384 * minimum of 4096 bytes. We're unlikely to build large sglists
2385 * out of smaller entries.
2387 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2388 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2389 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2390 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2392 INIT_LIST_HEAD(&srp_dev->dev_list);
2394 srp_dev->dev = device;
2395 srp_dev->pd = ib_alloc_pd(device);
2396 if (IS_ERR(srp_dev->pd))
2399 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2400 IB_ACCESS_LOCAL_WRITE |
2401 IB_ACCESS_REMOTE_READ |
2402 IB_ACCESS_REMOTE_WRITE);
2403 if (IS_ERR(srp_dev->mr))
2406 for (max_pages_per_fmr = SRP_FMR_SIZE;
2407 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2408 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2409 memset(&fmr_param, 0, sizeof fmr_param);
2410 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2411 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2412 fmr_param.cache = 1;
2413 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2414 fmr_param.page_shift = fmr_page_shift;
2415 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2416 IB_ACCESS_REMOTE_WRITE |
2417 IB_ACCESS_REMOTE_READ);
2419 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2420 if (!IS_ERR(srp_dev->fmr_pool))
2424 if (IS_ERR(srp_dev->fmr_pool))
2425 srp_dev->fmr_pool = NULL;
2427 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2432 e = device->phys_port_cnt;
2435 for (p = s; p <= e; ++p) {
2436 host = srp_add_port(srp_dev, p);
2438 list_add_tail(&host->list, &srp_dev->dev_list);
2441 ib_set_client_data(device, &srp_client, srp_dev);
2446 ib_dealloc_pd(srp_dev->pd);
2455 static void srp_remove_one(struct ib_device *device)
2457 struct srp_device *srp_dev;
2458 struct srp_host *host, *tmp_host;
2459 LIST_HEAD(target_list);
2460 struct srp_target_port *target, *tmp_target;
2462 srp_dev = ib_get_client_data(device, &srp_client);
2464 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2465 device_unregister(&host->dev);
2467 * Wait for the sysfs entry to go away, so that no new
2468 * target ports can be created.
2470 wait_for_completion(&host->released);
2473 * Mark all target ports as removed, so we stop queueing
2474 * commands and don't try to reconnect.
2476 spin_lock(&host->target_lock);
2477 list_for_each_entry(target, &host->target_list, list) {
2478 spin_lock_irq(&target->lock);
2479 target->state = SRP_TARGET_REMOVED;
2480 spin_unlock_irq(&target->lock);
2482 spin_unlock(&host->target_lock);
2485 * Wait for any reconnection tasks that may have
2486 * started before we marked our target ports as
2487 * removed, and any target port removal tasks.
2489 flush_workqueue(ib_wq);
2491 list_for_each_entry_safe(target, tmp_target,
2492 &host->target_list, list) {
2493 srp_remove_host(target->scsi_host);
2494 scsi_remove_host(target->scsi_host);
2495 srp_disconnect_target(target);
2496 ib_destroy_cm_id(target->cm_id);
2497 srp_free_target_ib(target);
2498 srp_free_req_data(target);
2499 scsi_host_put(target->scsi_host);
2505 if (srp_dev->fmr_pool)
2506 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2507 ib_dereg_mr(srp_dev->mr);
2508 ib_dealloc_pd(srp_dev->pd);
2513 static struct srp_function_template ib_srp_transport_functions = {
2516 static int __init srp_init_module(void)
2520 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2522 if (srp_sg_tablesize) {
2523 printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2524 if (!cmd_sg_entries)
2525 cmd_sg_entries = srp_sg_tablesize;
2528 if (!cmd_sg_entries)
2529 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2531 if (cmd_sg_entries > 255) {
2532 printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n");
2533 cmd_sg_entries = 255;
2536 if (!indirect_sg_entries)
2537 indirect_sg_entries = cmd_sg_entries;
2538 else if (indirect_sg_entries < cmd_sg_entries) {
2539 printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries);
2540 indirect_sg_entries = cmd_sg_entries;
2543 ib_srp_transport_template =
2544 srp_attach_transport(&ib_srp_transport_functions);
2545 if (!ib_srp_transport_template)
2548 ret = class_register(&srp_class);
2550 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2551 srp_release_transport(ib_srp_transport_template);
2555 ib_sa_register_client(&srp_sa_client);
2557 ret = ib_register_client(&srp_client);
2559 printk(KERN_ERR PFX "couldn't register IB client\n");
2560 srp_release_transport(ib_srp_transport_template);
2561 ib_sa_unregister_client(&srp_sa_client);
2562 class_unregister(&srp_class);
2569 static void __exit srp_cleanup_module(void)
2571 ib_unregister_client(&srp_client);
2572 ib_sa_unregister_client(&srp_sa_client);
2573 class_unregister(&srp_class);
2574 srp_release_transport(ib_srp_transport_template);
2577 module_init(srp_init_module);
2578 module_exit(srp_cleanup_module);