2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
45 #include <linux/atomic.h>
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_transport_srp.h>
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "1.0"
59 #define DRV_RELDATE "July 1, 2013"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 "v" DRV_VERSION " (" DRV_RELDATE ")");
64 MODULE_LICENSE("Dual BSD/GPL");
66 static unsigned int srp_sg_tablesize;
67 static unsigned int cmd_sg_entries;
68 static unsigned int indirect_sg_entries;
69 static bool allow_ext_sg;
70 static bool prefer_fr;
71 static bool register_always;
72 static int topspin_workarounds = 1;
74 module_param(srp_sg_tablesize, uint, 0444);
75 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77 module_param(cmd_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(cmd_sg_entries,
79 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81 module_param(indirect_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(indirect_sg_entries,
83 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85 module_param(allow_ext_sg, bool, 0444);
86 MODULE_PARM_DESC(allow_ext_sg,
87 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89 module_param(topspin_workarounds, int, 0444);
90 MODULE_PARM_DESC(topspin_workarounds,
91 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93 module_param(prefer_fr, bool, 0444);
94 MODULE_PARM_DESC(prefer_fr,
95 "Whether to use fast registration if both FMR and fast registration are supported");
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
101 static struct kernel_param_ops srp_tmo_ops;
103 static int srp_reconnect_delay = 10;
104 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108 static int srp_fast_io_fail_tmo = 15;
109 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 MODULE_PARM_DESC(fast_io_fail_tmo,
112 "Number of seconds between the observation of a transport"
113 " layer error and failing all I/O. \"off\" means that this"
114 " functionality is disabled.");
116 static int srp_dev_loss_tmo = 600;
117 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 MODULE_PARM_DESC(dev_loss_tmo,
120 "Maximum number of seconds that the SRP transport should"
121 " insulate transport layer errors. After this time has been"
122 " exceeded the SCSI host is removed. Should be"
123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 " this functionality is disabled.");
127 static unsigned ch_count;
128 module_param(ch_count, uint, 0444);
129 MODULE_PARM_DESC(ch_count,
130 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132 static void srp_add_one(struct ib_device *device);
133 static void srp_remove_one(struct ib_device *device);
134 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
136 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138 static struct scsi_transport_template *ib_srp_transport_template;
139 static struct workqueue_struct *srp_remove_wq;
141 static struct ib_client srp_client = {
144 .remove = srp_remove_one
147 static struct ib_sa_client srp_sa_client;
149 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151 int tmo = *(int *)kp->arg;
154 return sprintf(buffer, "%d", tmo);
156 return sprintf(buffer, "off");
159 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
163 if (strncmp(val, "off", 3) != 0) {
164 res = kstrtoint(val, 0, &tmo);
170 if (kp->arg == &srp_reconnect_delay)
171 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 else if (kp->arg == &srp_fast_io_fail_tmo)
174 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
176 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
180 *(int *)kp->arg = tmo;
186 static struct kernel_param_ops srp_tmo_ops = {
191 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
193 return (struct srp_target_port *) host->hostdata;
196 static const char *srp_target_info(struct Scsi_Host *host)
198 return host_to_target(host)->target_name;
201 static int srp_target_is_topspin(struct srp_target_port *target)
203 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
204 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
206 return topspin_workarounds &&
207 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
211 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
213 enum dma_data_direction direction)
217 iu = kmalloc(sizeof *iu, gfp_mask);
221 iu->buf = kzalloc(size, gfp_mask);
225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
231 iu->direction = direction;
243 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
254 static void srp_qp_event(struct ib_event *event, void *context)
256 pr_debug("QP event %d\n", event->event);
259 static int srp_init_qp(struct srp_target_port *target,
262 struct ib_qp_attr *attr;
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
281 ret = ib_modify_qp(qp, attr,
292 static int srp_new_cm_id(struct srp_rdma_ch *ch)
294 struct srp_target_port *target = ch->target;
295 struct ib_cm_id *new_cm_id;
297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
313 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
318 memset(&fmr_param, 0, sizeof(fmr_param));
319 fmr_param.pool_size = target->scsi_host->can_queue;
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
335 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
338 struct srp_fr_desc *d;
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
345 ib_free_fast_reg_page_list(d->frpl);
353 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
354 * @device: IB device to allocate fast registration descriptors for.
355 * @pd: Protection domain associated with the FR descriptors.
356 * @pool_size: Number of descriptors to allocate.
357 * @max_page_list_len: Maximum fast registration work request page list length.
359 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
360 struct ib_pd *pd, int pool_size,
361 int max_page_list_len)
363 struct srp_fr_pool *pool;
364 struct srp_fr_desc *d;
366 struct ib_fast_reg_page_list *frpl;
367 int i, ret = -EINVAL;
372 pool = kzalloc(sizeof(struct srp_fr_pool) +
373 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
376 pool->size = pool_size;
377 pool->max_page_list_len = max_page_list_len;
378 spin_lock_init(&pool->lock);
379 INIT_LIST_HEAD(&pool->free_list);
381 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
382 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
394 list_add_tail(&d->entry, &pool->free_list);
401 srp_destroy_fr_pool(pool);
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
412 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
414 struct srp_fr_desc *d = NULL;
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
422 spin_unlock_irqrestore(&pool->lock, flags);
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
436 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
448 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
450 struct srp_device *dev = target->srp_host->srp_dev;
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
466 static void srp_destroy_qp(struct srp_rdma_ch *ch)
468 struct srp_target_port *target = ch->target;
469 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
470 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
471 struct ib_recv_wr *bad_wr;
474 /* Destroying a QP and reusing ch->done is only safe if not connected */
475 WARN_ON_ONCE(target->connected);
477 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
478 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
482 init_completion(&ch->done);
483 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
484 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
486 wait_for_completion(&ch->done);
489 ib_destroy_qp(ch->qp);
492 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
494 struct srp_target_port *target = ch->target;
495 struct srp_device *dev = target->srp_host->srp_dev;
496 struct ib_qp_init_attr *init_attr;
497 struct ib_cq *recv_cq, *send_cq;
499 struct ib_fmr_pool *fmr_pool = NULL;
500 struct srp_fr_pool *fr_pool = NULL;
501 const int m = 1 + dev->use_fast_reg;
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
508 /* + 1 for SRP_LAST_WR_ID */
509 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
510 target->queue_size + 1, ch->comp_vector);
511 if (IS_ERR(recv_cq)) {
512 ret = PTR_ERR(recv_cq);
516 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
517 m * target->queue_size, ch->comp_vector);
518 if (IS_ERR(send_cq)) {
519 ret = PTR_ERR(send_cq);
523 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
525 init_attr->event_handler = srp_qp_event;
526 init_attr->cap.max_send_wr = m * target->queue_size;
527 init_attr->cap.max_recv_wr = target->queue_size + 1;
528 init_attr->cap.max_recv_sge = 1;
529 init_attr->cap.max_send_sge = 1;
530 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
531 init_attr->qp_type = IB_QPT_RC;
532 init_attr->send_cq = send_cq;
533 init_attr->recv_cq = recv_cq;
535 qp = ib_create_qp(dev->pd, init_attr);
541 ret = srp_init_qp(target, qp);
545 if (dev->use_fast_reg && dev->has_fr) {
546 fr_pool = srp_alloc_fr_pool(target);
547 if (IS_ERR(fr_pool)) {
548 ret = PTR_ERR(fr_pool);
549 shost_printk(KERN_WARNING, target->scsi_host, PFX
550 "FR pool allocation failed (%d)\n", ret);
554 srp_destroy_fr_pool(ch->fr_pool);
555 ch->fr_pool = fr_pool;
556 } else if (!dev->use_fast_reg && dev->has_fmr) {
557 fmr_pool = srp_alloc_fmr_pool(target);
558 if (IS_ERR(fmr_pool)) {
559 ret = PTR_ERR(fmr_pool);
560 shost_printk(KERN_WARNING, target->scsi_host, PFX
561 "FMR pool allocation failed (%d)\n", ret);
565 ib_destroy_fmr_pool(ch->fmr_pool);
566 ch->fmr_pool = fmr_pool;
572 ib_destroy_cq(ch->recv_cq);
574 ib_destroy_cq(ch->send_cq);
577 ch->recv_cq = recv_cq;
578 ch->send_cq = send_cq;
587 ib_destroy_cq(send_cq);
590 ib_destroy_cq(recv_cq);
598 * Note: this function may be called without srp_alloc_iu_bufs() having been
599 * invoked. Hence the ch->[rt]x_ring checks.
601 static void srp_free_ch_ib(struct srp_target_port *target,
602 struct srp_rdma_ch *ch)
604 struct srp_device *dev = target->srp_host->srp_dev;
611 ib_destroy_cm_id(ch->cm_id);
615 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
619 if (dev->use_fast_reg) {
621 srp_destroy_fr_pool(ch->fr_pool);
624 ib_destroy_fmr_pool(ch->fmr_pool);
627 ib_destroy_cq(ch->send_cq);
628 ib_destroy_cq(ch->recv_cq);
631 * Avoid that the SCSI error handler tries to use this channel after
632 * it has been freed. The SCSI error handler can namely continue
633 * trying to perform recovery actions after scsi_remove_host()
639 ch->send_cq = ch->recv_cq = NULL;
642 for (i = 0; i < target->queue_size; ++i)
643 srp_free_iu(target->srp_host, ch->rx_ring[i]);
648 for (i = 0; i < target->queue_size; ++i)
649 srp_free_iu(target->srp_host, ch->tx_ring[i]);
655 static void srp_path_rec_completion(int status,
656 struct ib_sa_path_rec *pathrec,
659 struct srp_rdma_ch *ch = ch_ptr;
660 struct srp_target_port *target = ch->target;
664 shost_printk(KERN_ERR, target->scsi_host,
665 PFX "Got failed path rec status %d\n", status);
671 static int srp_lookup_path(struct srp_rdma_ch *ch)
673 struct srp_target_port *target = ch->target;
676 ch->path.numb_path = 1;
678 init_completion(&ch->done);
680 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
681 target->srp_host->srp_dev->dev,
682 target->srp_host->port,
684 IB_SA_PATH_REC_SERVICE_ID |
685 IB_SA_PATH_REC_DGID |
686 IB_SA_PATH_REC_SGID |
687 IB_SA_PATH_REC_NUMB_PATH |
689 SRP_PATH_REC_TIMEOUT_MS,
691 srp_path_rec_completion,
692 ch, &ch->path_query);
693 if (ch->path_query_id < 0)
694 return ch->path_query_id;
696 ret = wait_for_completion_interruptible(&ch->done);
701 shost_printk(KERN_WARNING, target->scsi_host,
702 PFX "Path record query failed\n");
707 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
709 struct srp_target_port *target = ch->target;
711 struct ib_cm_req_param param;
712 struct srp_login_req priv;
716 req = kzalloc(sizeof *req, GFP_KERNEL);
720 req->param.primary_path = &ch->path;
721 req->param.alternate_path = NULL;
722 req->param.service_id = target->service_id;
723 req->param.qp_num = ch->qp->qp_num;
724 req->param.qp_type = ch->qp->qp_type;
725 req->param.private_data = &req->priv;
726 req->param.private_data_len = sizeof req->priv;
727 req->param.flow_control = 1;
729 get_random_bytes(&req->param.starting_psn, 4);
730 req->param.starting_psn &= 0xffffff;
733 * Pick some arbitrary defaults here; we could make these
734 * module parameters if anyone cared about setting them.
736 req->param.responder_resources = 4;
737 req->param.remote_cm_response_timeout = 20;
738 req->param.local_cm_response_timeout = 20;
739 req->param.retry_count = target->tl_retry_count;
740 req->param.rnr_retry_count = 7;
741 req->param.max_cm_retries = 15;
743 req->priv.opcode = SRP_LOGIN_REQ;
745 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
746 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
747 SRP_BUF_FORMAT_INDIRECT);
748 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
749 SRP_MULTICHAN_SINGLE);
751 * In the published SRP specification (draft rev. 16a), the
752 * port identifier format is 8 bytes of ID extension followed
753 * by 8 bytes of GUID. Older drafts put the two halves in the
754 * opposite order, so that the GUID comes first.
756 * Targets conforming to these obsolete drafts can be
757 * recognized by the I/O Class they report.
759 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
760 memcpy(req->priv.initiator_port_id,
761 &target->sgid.global.interface_id, 8);
762 memcpy(req->priv.initiator_port_id + 8,
763 &target->initiator_ext, 8);
764 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
765 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
767 memcpy(req->priv.initiator_port_id,
768 &target->initiator_ext, 8);
769 memcpy(req->priv.initiator_port_id + 8,
770 &target->sgid.global.interface_id, 8);
771 memcpy(req->priv.target_port_id, &target->id_ext, 8);
772 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
776 * Topspin/Cisco SRP targets will reject our login unless we
777 * zero out the first 8 bytes of our initiator port ID and set
778 * the second 8 bytes to the local node GUID.
780 if (srp_target_is_topspin(target)) {
781 shost_printk(KERN_DEBUG, target->scsi_host,
782 PFX "Topspin/Cisco initiator port ID workaround "
783 "activated for target GUID %016llx\n",
784 (unsigned long long) be64_to_cpu(target->ioc_guid));
785 memset(req->priv.initiator_port_id, 0, 8);
786 memcpy(req->priv.initiator_port_id + 8,
787 &target->srp_host->srp_dev->dev->node_guid, 8);
790 status = ib_send_cm_req(ch->cm_id, &req->param);
797 static bool srp_queue_remove_work(struct srp_target_port *target)
799 bool changed = false;
801 spin_lock_irq(&target->lock);
802 if (target->state != SRP_TARGET_REMOVED) {
803 target->state = SRP_TARGET_REMOVED;
806 spin_unlock_irq(&target->lock);
809 queue_work(srp_remove_wq, &target->remove_work);
814 static bool srp_change_conn_state(struct srp_target_port *target,
817 bool changed = false;
819 spin_lock_irq(&target->lock);
820 if (target->connected != connected) {
821 target->connected = connected;
824 spin_unlock_irq(&target->lock);
829 static void srp_disconnect_target(struct srp_target_port *target)
831 struct srp_rdma_ch *ch;
834 if (srp_change_conn_state(target, false)) {
835 /* XXX should send SRP_I_LOGOUT request */
837 for (i = 0; i < target->ch_count; i++) {
839 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
840 shost_printk(KERN_DEBUG, target->scsi_host,
841 PFX "Sending CM DREQ failed\n");
847 static void srp_free_req_data(struct srp_target_port *target,
848 struct srp_rdma_ch *ch)
850 struct srp_device *dev = target->srp_host->srp_dev;
851 struct ib_device *ibdev = dev->dev;
852 struct srp_request *req;
855 if (!ch->target || !ch->req_ring)
858 for (i = 0; i < target->req_ring_size; ++i) {
859 req = &ch->req_ring[i];
860 if (dev->use_fast_reg)
863 kfree(req->fmr_list);
864 kfree(req->map_page);
865 if (req->indirect_dma_addr) {
866 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
867 target->indirect_size,
870 kfree(req->indirect_desc);
877 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
879 struct srp_target_port *target = ch->target;
880 struct srp_device *srp_dev = target->srp_host->srp_dev;
881 struct ib_device *ibdev = srp_dev->dev;
882 struct srp_request *req;
885 int i, ret = -ENOMEM;
887 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
892 for (i = 0; i < target->req_ring_size; ++i) {
893 req = &ch->req_ring[i];
894 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
898 if (srp_dev->use_fast_reg)
899 req->fr_list = mr_list;
901 req->fmr_list = mr_list;
902 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
903 sizeof(void *), GFP_KERNEL);
906 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
907 if (!req->indirect_desc)
910 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
911 target->indirect_size,
913 if (ib_dma_mapping_error(ibdev, dma_addr))
916 req->indirect_dma_addr = dma_addr;
925 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
926 * @shost: SCSI host whose attributes to remove from sysfs.
928 * Note: Any attributes defined in the host template and that did not exist
929 * before invocation of this function will be ignored.
931 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
933 struct device_attribute **attr;
935 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
936 device_remove_file(&shost->shost_dev, *attr);
939 static void srp_remove_target(struct srp_target_port *target)
941 struct srp_rdma_ch *ch;
944 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
946 srp_del_scsi_host_attr(target->scsi_host);
947 srp_rport_get(target->rport);
948 srp_remove_host(target->scsi_host);
949 scsi_remove_host(target->scsi_host);
950 srp_stop_rport_timers(target->rport);
951 srp_disconnect_target(target);
952 for (i = 0; i < target->ch_count; i++) {
954 srp_free_ch_ib(target, ch);
956 cancel_work_sync(&target->tl_err_work);
957 srp_rport_put(target->rport);
958 for (i = 0; i < target->ch_count; i++) {
960 srp_free_req_data(target, ch);
965 spin_lock(&target->srp_host->target_lock);
966 list_del(&target->list);
967 spin_unlock(&target->srp_host->target_lock);
969 scsi_host_put(target->scsi_host);
972 static void srp_remove_work(struct work_struct *work)
974 struct srp_target_port *target =
975 container_of(work, struct srp_target_port, remove_work);
977 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
979 srp_remove_target(target);
982 static void srp_rport_delete(struct srp_rport *rport)
984 struct srp_target_port *target = rport->lld_data;
986 srp_queue_remove_work(target);
989 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
991 struct srp_target_port *target = ch->target;
994 WARN_ON_ONCE(!multich && target->connected);
996 target->qp_in_error = false;
998 ret = srp_lookup_path(ch);
1003 init_completion(&ch->done);
1004 ret = srp_send_req(ch, multich);
1007 ret = wait_for_completion_interruptible(&ch->done);
1012 * The CM event handling code will set status to
1013 * SRP_PORT_REDIRECT if we get a port redirect REJ
1014 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015 * redirect REJ back.
1017 switch (ch->status) {
1019 srp_change_conn_state(target, true);
1022 case SRP_PORT_REDIRECT:
1023 ret = srp_lookup_path(ch);
1028 case SRP_DLID_REDIRECT:
1031 case SRP_STALE_CONN:
1032 shost_printk(KERN_ERR, target->scsi_host, PFX
1033 "giving up on stale connection\n");
1034 ch->status = -ECONNRESET;
1043 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1045 struct ib_send_wr *bad_wr;
1046 struct ib_send_wr wr = {
1047 .opcode = IB_WR_LOCAL_INV,
1048 .wr_id = LOCAL_INV_WR_ID_MASK,
1052 .ex.invalidate_rkey = rkey,
1055 return ib_post_send(ch->qp, &wr, &bad_wr);
1058 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1059 struct srp_rdma_ch *ch,
1060 struct srp_request *req)
1062 struct srp_target_port *target = ch->target;
1063 struct srp_device *dev = target->srp_host->srp_dev;
1064 struct ib_device *ibdev = dev->dev;
1067 if (!scsi_sglist(scmnd) ||
1068 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1072 if (dev->use_fast_reg) {
1073 struct srp_fr_desc **pfr;
1075 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1076 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1078 shost_printk(KERN_ERR, target->scsi_host, PFX
1079 "Queueing INV WR for rkey %#x failed (%d)\n",
1080 (*pfr)->mr->rkey, res);
1081 queue_work(system_long_wq,
1082 &target->tl_err_work);
1086 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1089 struct ib_pool_fmr **pfmr;
1091 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 ib_fmr_pool_unmap(*pfmr);
1095 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 scmnd->sc_data_direction);
1100 * srp_claim_req - Take ownership of the scmnd associated with a request.
1101 * @ch: SRP RDMA channel.
1102 * @req: SRP request.
1103 * @sdev: If not NULL, only take ownership for this SCSI device.
1104 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105 * ownership of @req->scmnd if it equals @scmnd.
1108 * Either NULL or a pointer to the SCSI command the caller became owner of.
1110 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1111 struct srp_request *req,
1112 struct scsi_device *sdev,
1113 struct scsi_cmnd *scmnd)
1115 unsigned long flags;
1117 spin_lock_irqsave(&ch->lock, flags);
1119 (!sdev || req->scmnd->device == sdev) &&
1120 (!scmnd || req->scmnd == scmnd)) {
1126 spin_unlock_irqrestore(&ch->lock, flags);
1132 * srp_free_req() - Unmap data and add request to the free request list.
1133 * @ch: SRP RDMA channel.
1134 * @req: Request to be freed.
1135 * @scmnd: SCSI command associated with @req.
1136 * @req_lim_delta: Amount to be added to @target->req_lim.
1138 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1141 unsigned long flags;
1143 srp_unmap_data(scmnd, ch, req);
1145 spin_lock_irqsave(&ch->lock, flags);
1146 ch->req_lim += req_lim_delta;
1147 spin_unlock_irqrestore(&ch->lock, flags);
1150 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 struct scsi_device *sdev, int result)
1153 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1156 srp_free_req(ch, req, scmnd, 0);
1157 scmnd->result = result;
1158 scmnd->scsi_done(scmnd);
1162 static void srp_terminate_io(struct srp_rport *rport)
1164 struct srp_target_port *target = rport->lld_data;
1165 struct srp_rdma_ch *ch;
1166 struct Scsi_Host *shost = target->scsi_host;
1167 struct scsi_device *sdev;
1171 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 * is not safe. Hence the warning statement below.
1174 shost_for_each_device(sdev, shost)
1175 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1177 for (i = 0; i < target->ch_count; i++) {
1178 ch = &target->ch[i];
1180 for (j = 0; j < target->req_ring_size; ++j) {
1181 struct srp_request *req = &ch->req_ring[j];
1183 srp_finish_req(ch, req, NULL,
1184 DID_TRANSPORT_FAILFAST << 16);
1190 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192 * srp_reset_device() or srp_reset_host() calls will occur while this function
1193 * is in progress. One way to realize that is not to call this function
1194 * directly but to call srp_reconnect_rport() instead since that last function
1195 * serializes calls of this function via rport->mutex and also blocks
1196 * srp_queuecommand() calls before invoking this function.
1198 static int srp_rport_reconnect(struct srp_rport *rport)
1200 struct srp_target_port *target = rport->lld_data;
1201 struct srp_rdma_ch *ch;
1203 bool multich = false;
1205 srp_disconnect_target(target);
1207 if (target->state == SRP_TARGET_SCANNING)
1211 * Now get a new local CM ID so that we avoid confusing the target in
1212 * case things are really fouled up. Doing so also ensures that all CM
1213 * callbacks will have finished before a new QP is allocated.
1215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
1219 ret += srp_new_cm_id(ch);
1221 for (i = 0; i < target->ch_count; i++) {
1222 ch = &target->ch[i];
1225 for (j = 0; j < target->req_ring_size; ++j) {
1226 struct srp_request *req = &ch->req_ring[j];
1228 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1231 for (i = 0; i < target->ch_count; i++) {
1232 ch = &target->ch[i];
1236 * Whether or not creating a new CM ID succeeded, create a new
1237 * QP. This guarantees that all completion callback function
1238 * invocations have finished before request resetting starts.
1240 ret += srp_create_ch_ib(ch);
1242 INIT_LIST_HEAD(&ch->free_tx);
1243 for (j = 0; j < target->queue_size; ++j)
1244 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1246 for (i = 0; i < target->ch_count; i++) {
1247 ch = &target->ch[i];
1248 if (ret || !ch->target) {
1253 ret = srp_connect_ch(ch, multich);
1258 shost_printk(KERN_INFO, target->scsi_host,
1259 PFX "reconnect succeeded\n");
1264 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1265 unsigned int dma_len, u32 rkey)
1267 struct srp_direct_buf *desc = state->desc;
1269 desc->va = cpu_to_be64(dma_addr);
1270 desc->key = cpu_to_be32(rkey);
1271 desc->len = cpu_to_be32(dma_len);
1273 state->total_len += dma_len;
1278 static int srp_map_finish_fmr(struct srp_map_state *state,
1279 struct srp_rdma_ch *ch)
1281 struct ib_pool_fmr *fmr;
1284 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1285 state->npages, io_addr);
1287 return PTR_ERR(fmr);
1289 *state->next_fmr++ = fmr;
1292 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1297 static int srp_map_finish_fr(struct srp_map_state *state,
1298 struct srp_rdma_ch *ch)
1300 struct srp_target_port *target = ch->target;
1301 struct srp_device *dev = target->srp_host->srp_dev;
1302 struct ib_send_wr *bad_wr;
1303 struct ib_send_wr wr;
1304 struct srp_fr_desc *desc;
1307 desc = srp_fr_pool_get(ch->fr_pool);
1311 rkey = ib_inc_rkey(desc->mr->rkey);
1312 ib_update_fast_reg_key(desc->mr, rkey);
1314 memcpy(desc->frpl->page_list, state->pages,
1315 sizeof(state->pages[0]) * state->npages);
1317 memset(&wr, 0, sizeof(wr));
1318 wr.opcode = IB_WR_FAST_REG_MR;
1319 wr.wr_id = FAST_REG_WR_ID_MASK;
1320 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1321 wr.wr.fast_reg.page_list = desc->frpl;
1322 wr.wr.fast_reg.page_list_len = state->npages;
1323 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1324 wr.wr.fast_reg.length = state->dma_len;
1325 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1326 IB_ACCESS_REMOTE_READ |
1327 IB_ACCESS_REMOTE_WRITE);
1328 wr.wr.fast_reg.rkey = desc->mr->lkey;
1330 *state->next_fr++ = desc;
1333 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1336 return ib_post_send(ch->qp, &wr, &bad_wr);
1339 static int srp_finish_mapping(struct srp_map_state *state,
1340 struct srp_rdma_ch *ch)
1342 struct srp_target_port *target = ch->target;
1345 if (state->npages == 0)
1348 if (state->npages == 1 && !register_always)
1349 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1352 ret = target->srp_host->srp_dev->use_fast_reg ?
1353 srp_map_finish_fr(state, ch) :
1354 srp_map_finish_fmr(state, ch);
1364 static void srp_map_update_start(struct srp_map_state *state,
1365 struct scatterlist *sg, int sg_index,
1366 dma_addr_t dma_addr)
1368 state->unmapped_sg = sg;
1369 state->unmapped_index = sg_index;
1370 state->unmapped_addr = dma_addr;
1373 static int srp_map_sg_entry(struct srp_map_state *state,
1374 struct srp_rdma_ch *ch,
1375 struct scatterlist *sg, int sg_index,
1378 struct srp_target_port *target = ch->target;
1379 struct srp_device *dev = target->srp_host->srp_dev;
1380 struct ib_device *ibdev = dev->dev;
1381 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1382 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1391 * Once we're in direct map mode for a request, we don't
1392 * go back to FMR or FR mode, so no need to update anything
1393 * other than the descriptor.
1395 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1400 * Since not all RDMA HW drivers support non-zero page offsets for
1401 * FMR, if we start at an offset into a page, don't merge into the
1402 * current FMR mapping. Finish it out, and use the kernel's MR for
1405 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1406 dma_len > dev->mr_max_size) {
1407 ret = srp_finish_mapping(state, ch);
1411 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1412 srp_map_update_start(state, NULL, 0, 0);
1417 * If this is the first sg that will be mapped via FMR or via FR, save
1418 * our position. We need to know the first unmapped entry, its index,
1419 * and the first unmapped address within that entry to be able to
1420 * restart mapping after an error.
1422 if (!state->unmapped_sg)
1423 srp_map_update_start(state, sg, sg_index, dma_addr);
1426 unsigned offset = dma_addr & ~dev->mr_page_mask;
1427 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1428 ret = srp_finish_mapping(state, ch);
1432 srp_map_update_start(state, sg, sg_index, dma_addr);
1435 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1438 state->base_dma_addr = dma_addr;
1439 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1440 state->dma_len += len;
1446 * If the last entry of the MR wasn't a full page, then we need to
1447 * close it out and start a new one -- we can only merge at page
1451 if (len != dev->mr_page_size) {
1452 ret = srp_finish_mapping(state, ch);
1454 srp_map_update_start(state, NULL, 0, 0);
1459 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1460 struct srp_request *req, struct scatterlist *scat,
1463 struct srp_target_port *target = ch->target;
1464 struct srp_device *dev = target->srp_host->srp_dev;
1465 struct ib_device *ibdev = dev->dev;
1466 struct scatterlist *sg;
1470 state->desc = req->indirect_desc;
1471 state->pages = req->map_page;
1472 if (dev->use_fast_reg) {
1473 state->next_fr = req->fr_list;
1474 use_mr = !!ch->fr_pool;
1476 state->next_fmr = req->fmr_list;
1477 use_mr = !!ch->fmr_pool;
1480 for_each_sg(scat, sg, count, i) {
1481 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1483 * Memory registration failed, so backtrack to the
1484 * first unmapped entry and continue on without using
1485 * memory registration.
1487 dma_addr_t dma_addr;
1488 unsigned int dma_len;
1491 sg = state->unmapped_sg;
1492 i = state->unmapped_index;
1494 dma_addr = ib_sg_dma_address(ibdev, sg);
1495 dma_len = ib_sg_dma_len(ibdev, sg);
1496 dma_len -= (state->unmapped_addr - dma_addr);
1497 dma_addr = state->unmapped_addr;
1499 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1503 if (use_mr && srp_finish_mapping(state, ch))
1506 req->nmdesc = state->nmdesc;
1511 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1512 struct srp_request *req)
1514 struct srp_target_port *target = ch->target;
1515 struct scatterlist *scat;
1516 struct srp_cmd *cmd = req->cmd->buf;
1517 int len, nents, count;
1518 struct srp_device *dev;
1519 struct ib_device *ibdev;
1520 struct srp_map_state state;
1521 struct srp_indirect_buf *indirect_hdr;
1525 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1526 return sizeof (struct srp_cmd);
1528 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1529 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1530 shost_printk(KERN_WARNING, target->scsi_host,
1531 PFX "Unhandled data direction %d\n",
1532 scmnd->sc_data_direction);
1536 nents = scsi_sg_count(scmnd);
1537 scat = scsi_sglist(scmnd);
1539 dev = target->srp_host->srp_dev;
1542 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1543 if (unlikely(count == 0))
1546 fmt = SRP_DATA_DESC_DIRECT;
1547 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1549 if (count == 1 && !register_always) {
1551 * The midlayer only generated a single gather/scatter
1552 * entry, or DMA mapping coalesced everything to a
1553 * single entry. So a direct descriptor along with
1554 * the DMA MR suffices.
1556 struct srp_direct_buf *buf = (void *) cmd->add_data;
1558 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1559 buf->key = cpu_to_be32(target->rkey);
1560 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1567 * We have more than one scatter/gather entry, so build our indirect
1568 * descriptor table, trying to merge as many entries as we can.
1570 indirect_hdr = (void *) cmd->add_data;
1572 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1573 target->indirect_size, DMA_TO_DEVICE);
1575 memset(&state, 0, sizeof(state));
1576 srp_map_sg(&state, ch, req, scat, count);
1578 /* We've mapped the request, now pull as much of the indirect
1579 * descriptor table as we can into the command buffer. If this
1580 * target is not using an external indirect table, we are
1581 * guaranteed to fit into the command, as the SCSI layer won't
1582 * give us more S/G entries than we allow.
1584 if (state.ndesc == 1) {
1586 * Memory registration collapsed the sg-list into one entry,
1587 * so use a direct descriptor.
1589 struct srp_direct_buf *buf = (void *) cmd->add_data;
1591 *buf = req->indirect_desc[0];
1595 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1596 !target->allow_ext_sg)) {
1597 shost_printk(KERN_ERR, target->scsi_host,
1598 "Could not fit S/G list into SRP_CMD\n");
1602 count = min(state.ndesc, target->cmd_sg_cnt);
1603 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1605 fmt = SRP_DATA_DESC_INDIRECT;
1606 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1607 len += count * sizeof (struct srp_direct_buf);
1609 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1610 count * sizeof (struct srp_direct_buf));
1612 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1613 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1614 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1615 indirect_hdr->len = cpu_to_be32(state.total_len);
1617 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1618 cmd->data_out_desc_cnt = count;
1620 cmd->data_in_desc_cnt = count;
1622 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1626 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1627 cmd->buf_fmt = fmt << 4;
1635 * Return an IU and possible credit to the free pool
1637 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1638 enum srp_iu_type iu_type)
1640 unsigned long flags;
1642 spin_lock_irqsave(&ch->lock, flags);
1643 list_add(&iu->list, &ch->free_tx);
1644 if (iu_type != SRP_IU_RSP)
1646 spin_unlock_irqrestore(&ch->lock, flags);
1650 * Must be called with ch->lock held to protect req_lim and free_tx.
1651 * If IU is not sent, it must be returned using srp_put_tx_iu().
1654 * An upper limit for the number of allocated information units for each
1656 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1657 * more than Scsi_Host.can_queue requests.
1658 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1659 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1660 * one unanswered SRP request to an initiator.
1662 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1663 enum srp_iu_type iu_type)
1665 struct srp_target_port *target = ch->target;
1666 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1669 srp_send_completion(ch->send_cq, ch);
1671 if (list_empty(&ch->free_tx))
1674 /* Initiator responses to target requests do not consume credits */
1675 if (iu_type != SRP_IU_RSP) {
1676 if (ch->req_lim <= rsv) {
1677 ++target->zero_req_lim;
1684 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1685 list_del(&iu->list);
1689 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1691 struct srp_target_port *target = ch->target;
1693 struct ib_send_wr wr, *bad_wr;
1695 list.addr = iu->dma;
1697 list.lkey = target->lkey;
1700 wr.wr_id = (uintptr_t) iu;
1703 wr.opcode = IB_WR_SEND;
1704 wr.send_flags = IB_SEND_SIGNALED;
1706 return ib_post_send(ch->qp, &wr, &bad_wr);
1709 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1711 struct srp_target_port *target = ch->target;
1712 struct ib_recv_wr wr, *bad_wr;
1715 list.addr = iu->dma;
1716 list.length = iu->size;
1717 list.lkey = target->lkey;
1720 wr.wr_id = (uintptr_t) iu;
1724 return ib_post_recv(ch->qp, &wr, &bad_wr);
1727 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1729 struct srp_target_port *target = ch->target;
1730 struct srp_request *req;
1731 struct scsi_cmnd *scmnd;
1732 unsigned long flags;
1734 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1735 spin_lock_irqsave(&ch->lock, flags);
1736 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1737 spin_unlock_irqrestore(&ch->lock, flags);
1739 ch->tsk_mgmt_status = -1;
1740 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1741 ch->tsk_mgmt_status = rsp->data[3];
1742 complete(&ch->tsk_mgmt_done);
1744 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1746 req = (void *)scmnd->host_scribble;
1747 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1750 shost_printk(KERN_ERR, target->scsi_host,
1751 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1752 rsp->tag, ch - target->ch, ch->qp->qp_num);
1754 spin_lock_irqsave(&ch->lock, flags);
1755 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1756 spin_unlock_irqrestore(&ch->lock, flags);
1760 scmnd->result = rsp->status;
1762 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1763 memcpy(scmnd->sense_buffer, rsp->data +
1764 be32_to_cpu(rsp->resp_data_len),
1765 min_t(int, be32_to_cpu(rsp->sense_data_len),
1766 SCSI_SENSE_BUFFERSIZE));
1769 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1770 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1771 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1772 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1773 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1774 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1775 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1776 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1778 srp_free_req(ch, req, scmnd,
1779 be32_to_cpu(rsp->req_lim_delta));
1781 scmnd->host_scribble = NULL;
1782 scmnd->scsi_done(scmnd);
1786 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1789 struct srp_target_port *target = ch->target;
1790 struct ib_device *dev = target->srp_host->srp_dev->dev;
1791 unsigned long flags;
1795 spin_lock_irqsave(&ch->lock, flags);
1796 ch->req_lim += req_delta;
1797 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1798 spin_unlock_irqrestore(&ch->lock, flags);
1801 shost_printk(KERN_ERR, target->scsi_host, PFX
1802 "no IU available to send response\n");
1806 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1807 memcpy(iu->buf, rsp, len);
1808 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1810 err = srp_post_send(ch, iu, len);
1812 shost_printk(KERN_ERR, target->scsi_host, PFX
1813 "unable to post response: %d\n", err);
1814 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1820 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1821 struct srp_cred_req *req)
1823 struct srp_cred_rsp rsp = {
1824 .opcode = SRP_CRED_RSP,
1827 s32 delta = be32_to_cpu(req->req_lim_delta);
1829 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1830 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1831 "problems processing SRP_CRED_REQ\n");
1834 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1835 struct srp_aer_req *req)
1837 struct srp_target_port *target = ch->target;
1838 struct srp_aer_rsp rsp = {
1839 .opcode = SRP_AER_RSP,
1842 s32 delta = be32_to_cpu(req->req_lim_delta);
1844 shost_printk(KERN_ERR, target->scsi_host, PFX
1845 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1847 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1848 shost_printk(KERN_ERR, target->scsi_host, PFX
1849 "problems processing SRP_AER_REQ\n");
1852 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1854 struct srp_target_port *target = ch->target;
1855 struct ib_device *dev = target->srp_host->srp_dev->dev;
1856 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1860 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1863 opcode = *(u8 *) iu->buf;
1866 shost_printk(KERN_ERR, target->scsi_host,
1867 PFX "recv completion, opcode 0x%02x\n", opcode);
1868 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1869 iu->buf, wc->byte_len, true);
1874 srp_process_rsp(ch, iu->buf);
1878 srp_process_cred_req(ch, iu->buf);
1882 srp_process_aer_req(ch, iu->buf);
1886 /* XXX Handle target logout */
1887 shost_printk(KERN_WARNING, target->scsi_host,
1888 PFX "Got target logout request\n");
1892 shost_printk(KERN_WARNING, target->scsi_host,
1893 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1897 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1900 res = srp_post_recv(ch, iu);
1902 shost_printk(KERN_ERR, target->scsi_host,
1903 PFX "Recv failed with error code %d\n", res);
1907 * srp_tl_err_work() - handle a transport layer error
1908 * @work: Work structure embedded in an SRP target port.
1910 * Note: This function may get invoked before the rport has been created,
1911 * hence the target->rport test.
1913 static void srp_tl_err_work(struct work_struct *work)
1915 struct srp_target_port *target;
1917 target = container_of(work, struct srp_target_port, tl_err_work);
1919 srp_start_tl_fail_timers(target->rport);
1922 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1923 bool send_err, struct srp_rdma_ch *ch)
1925 struct srp_target_port *target = ch->target;
1927 if (wr_id == SRP_LAST_WR_ID) {
1928 complete(&ch->done);
1932 if (target->connected && !target->qp_in_error) {
1933 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1934 shost_printk(KERN_ERR, target->scsi_host, PFX
1935 "LOCAL_INV failed with status %d\n",
1937 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1938 shost_printk(KERN_ERR, target->scsi_host, PFX
1939 "FAST_REG_MR failed status %d\n",
1942 shost_printk(KERN_ERR, target->scsi_host,
1943 PFX "failed %s status %d for iu %p\n",
1944 send_err ? "send" : "receive",
1945 wc_status, (void *)(uintptr_t)wr_id);
1947 queue_work(system_long_wq, &target->tl_err_work);
1949 target->qp_in_error = true;
1952 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1954 struct srp_rdma_ch *ch = ch_ptr;
1957 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1958 while (ib_poll_cq(cq, 1, &wc) > 0) {
1959 if (likely(wc.status == IB_WC_SUCCESS)) {
1960 srp_handle_recv(ch, &wc);
1962 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1967 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1969 struct srp_rdma_ch *ch = ch_ptr;
1973 while (ib_poll_cq(cq, 1, &wc) > 0) {
1974 if (likely(wc.status == IB_WC_SUCCESS)) {
1975 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1976 list_add(&iu->list, &ch->free_tx);
1978 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1983 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1985 struct srp_target_port *target = host_to_target(shost);
1986 struct srp_rport *rport = target->rport;
1987 struct srp_rdma_ch *ch;
1988 struct srp_request *req;
1990 struct srp_cmd *cmd;
1991 struct ib_device *dev;
1992 unsigned long flags;
1996 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1999 * The SCSI EH thread is the only context from which srp_queuecommand()
2000 * can get invoked for blocked devices (SDEV_BLOCK /
2001 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2002 * locking the rport mutex if invoked from inside the SCSI EH.
2005 mutex_lock(&rport->mutex);
2007 scmnd->result = srp_chkready(target->rport);
2008 if (unlikely(scmnd->result))
2011 WARN_ON_ONCE(scmnd->request->tag < 0);
2012 tag = blk_mq_unique_tag(scmnd->request);
2013 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2014 idx = blk_mq_unique_tag_to_tag(tag);
2015 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2016 dev_name(&shost->shost_gendev), tag, idx,
2017 target->req_ring_size);
2019 spin_lock_irqsave(&ch->lock, flags);
2020 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2021 spin_unlock_irqrestore(&ch->lock, flags);
2026 req = &ch->req_ring[idx];
2027 dev = target->srp_host->srp_dev->dev;
2028 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2031 scmnd->host_scribble = (void *) req;
2034 memset(cmd, 0, sizeof *cmd);
2036 cmd->opcode = SRP_CMD;
2037 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
2039 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2044 len = srp_map_data(scmnd, ch, req);
2046 shost_printk(KERN_ERR, target->scsi_host,
2047 PFX "Failed to map data (%d)\n", len);
2049 * If we ran out of memory descriptors (-ENOMEM) because an
2050 * application is queuing many requests with more than
2051 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2052 * to reduce queue depth temporarily.
2054 scmnd->result = len == -ENOMEM ?
2055 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2059 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2062 if (srp_post_send(ch, iu, len)) {
2063 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2071 mutex_unlock(&rport->mutex);
2076 srp_unmap_data(scmnd, ch, req);
2079 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2082 * Avoid that the loops that iterate over the request ring can
2083 * encounter a dangling SCSI command pointer.
2088 if (scmnd->result) {
2089 scmnd->scsi_done(scmnd);
2092 ret = SCSI_MLQUEUE_HOST_BUSY;
2099 * Note: the resources allocated in this function are freed in
2102 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2104 struct srp_target_port *target = ch->target;
2107 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2111 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2116 for (i = 0; i < target->queue_size; ++i) {
2117 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2119 GFP_KERNEL, DMA_FROM_DEVICE);
2120 if (!ch->rx_ring[i])
2124 for (i = 0; i < target->queue_size; ++i) {
2125 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2127 GFP_KERNEL, DMA_TO_DEVICE);
2128 if (!ch->tx_ring[i])
2131 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2137 for (i = 0; i < target->queue_size; ++i) {
2138 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2139 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2152 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2154 uint64_t T_tr_ns, max_compl_time_ms;
2155 uint32_t rq_tmo_jiffies;
2158 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2159 * table 91), both the QP timeout and the retry count have to be set
2160 * for RC QP's during the RTR to RTS transition.
2162 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2163 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2166 * Set target->rq_tmo_jiffies to one second more than the largest time
2167 * it can take before an error completion is generated. See also
2168 * C9-140..142 in the IBTA spec for more information about how to
2169 * convert the QP Local ACK Timeout value to nanoseconds.
2171 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2172 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2173 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2174 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2176 return rq_tmo_jiffies;
2179 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2180 struct srp_login_rsp *lrsp,
2181 struct srp_rdma_ch *ch)
2183 struct srp_target_port *target = ch->target;
2184 struct ib_qp_attr *qp_attr = NULL;
2189 if (lrsp->opcode == SRP_LOGIN_RSP) {
2190 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2191 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2194 * Reserve credits for task management so we don't
2195 * bounce requests back to the SCSI mid-layer.
2197 target->scsi_host->can_queue
2198 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2199 target->scsi_host->can_queue);
2200 target->scsi_host->cmd_per_lun
2201 = min_t(int, target->scsi_host->can_queue,
2202 target->scsi_host->cmd_per_lun);
2204 shost_printk(KERN_WARNING, target->scsi_host,
2205 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2211 ret = srp_alloc_iu_bufs(ch);
2217 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2221 qp_attr->qp_state = IB_QPS_RTR;
2222 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2226 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2230 for (i = 0; i < target->queue_size; i++) {
2231 struct srp_iu *iu = ch->rx_ring[i];
2233 ret = srp_post_recv(ch, iu);
2238 qp_attr->qp_state = IB_QPS_RTS;
2239 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2243 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2245 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2249 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2258 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2259 struct ib_cm_event *event,
2260 struct srp_rdma_ch *ch)
2262 struct srp_target_port *target = ch->target;
2263 struct Scsi_Host *shost = target->scsi_host;
2264 struct ib_class_port_info *cpi;
2267 switch (event->param.rej_rcvd.reason) {
2268 case IB_CM_REJ_PORT_CM_REDIRECT:
2269 cpi = event->param.rej_rcvd.ari;
2270 ch->path.dlid = cpi->redirect_lid;
2271 ch->path.pkey = cpi->redirect_pkey;
2272 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2273 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2275 ch->status = ch->path.dlid ?
2276 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2279 case IB_CM_REJ_PORT_REDIRECT:
2280 if (srp_target_is_topspin(target)) {
2282 * Topspin/Cisco SRP gateways incorrectly send
2283 * reject reason code 25 when they mean 24
2286 memcpy(ch->path.dgid.raw,
2287 event->param.rej_rcvd.ari, 16);
2289 shost_printk(KERN_DEBUG, shost,
2290 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2291 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2292 be64_to_cpu(ch->path.dgid.global.interface_id));
2294 ch->status = SRP_PORT_REDIRECT;
2296 shost_printk(KERN_WARNING, shost,
2297 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2298 ch->status = -ECONNRESET;
2302 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2303 shost_printk(KERN_WARNING, shost,
2304 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2305 ch->status = -ECONNRESET;
2308 case IB_CM_REJ_CONSUMER_DEFINED:
2309 opcode = *(u8 *) event->private_data;
2310 if (opcode == SRP_LOGIN_REJ) {
2311 struct srp_login_rej *rej = event->private_data;
2312 u32 reason = be32_to_cpu(rej->reason);
2314 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2315 shost_printk(KERN_WARNING, shost,
2316 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2318 shost_printk(KERN_WARNING, shost, PFX
2319 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2321 target->orig_dgid.raw, reason);
2323 shost_printk(KERN_WARNING, shost,
2324 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2325 " opcode 0x%02x\n", opcode);
2326 ch->status = -ECONNRESET;
2329 case IB_CM_REJ_STALE_CONN:
2330 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2331 ch->status = SRP_STALE_CONN;
2335 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2336 event->param.rej_rcvd.reason);
2337 ch->status = -ECONNRESET;
2341 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2343 struct srp_rdma_ch *ch = cm_id->context;
2344 struct srp_target_port *target = ch->target;
2347 switch (event->event) {
2348 case IB_CM_REQ_ERROR:
2349 shost_printk(KERN_DEBUG, target->scsi_host,
2350 PFX "Sending CM REQ failed\n");
2352 ch->status = -ECONNRESET;
2355 case IB_CM_REP_RECEIVED:
2357 srp_cm_rep_handler(cm_id, event->private_data, ch);
2360 case IB_CM_REJ_RECEIVED:
2361 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2364 srp_cm_rej_handler(cm_id, event, ch);
2367 case IB_CM_DREQ_RECEIVED:
2368 shost_printk(KERN_WARNING, target->scsi_host,
2369 PFX "DREQ received - connection closed\n");
2370 srp_change_conn_state(target, false);
2371 if (ib_send_cm_drep(cm_id, NULL, 0))
2372 shost_printk(KERN_ERR, target->scsi_host,
2373 PFX "Sending CM DREP failed\n");
2374 queue_work(system_long_wq, &target->tl_err_work);
2377 case IB_CM_TIMEWAIT_EXIT:
2378 shost_printk(KERN_ERR, target->scsi_host,
2379 PFX "connection closed\n");
2385 case IB_CM_MRA_RECEIVED:
2386 case IB_CM_DREQ_ERROR:
2387 case IB_CM_DREP_RECEIVED:
2391 shost_printk(KERN_WARNING, target->scsi_host,
2392 PFX "Unhandled CM event %d\n", event->event);
2397 complete(&ch->done);
2403 * srp_change_queue_depth - setting device queue depth
2404 * @sdev: scsi device struct
2405 * @qdepth: requested queue depth
2407 * Returns queue depth.
2410 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2412 if (!sdev->tagged_supported)
2414 return scsi_change_queue_depth(sdev, qdepth);
2417 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2418 unsigned int lun, u8 func)
2420 struct srp_target_port *target = ch->target;
2421 struct srp_rport *rport = target->rport;
2422 struct ib_device *dev = target->srp_host->srp_dev->dev;
2424 struct srp_tsk_mgmt *tsk_mgmt;
2426 if (!target->connected || target->qp_in_error)
2429 init_completion(&ch->tsk_mgmt_done);
2432 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2433 * invoked while a task management function is being sent.
2435 mutex_lock(&rport->mutex);
2436 spin_lock_irq(&ch->lock);
2437 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2438 spin_unlock_irq(&ch->lock);
2441 mutex_unlock(&rport->mutex);
2446 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2449 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2451 tsk_mgmt->opcode = SRP_TSK_MGMT;
2452 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2453 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2454 tsk_mgmt->tsk_mgmt_func = func;
2455 tsk_mgmt->task_tag = req_tag;
2457 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2459 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2460 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2461 mutex_unlock(&rport->mutex);
2465 mutex_unlock(&rport->mutex);
2467 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2468 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2474 static int srp_abort(struct scsi_cmnd *scmnd)
2476 struct srp_target_port *target = host_to_target(scmnd->device->host);
2477 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2480 struct srp_rdma_ch *ch;
2483 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2487 tag = blk_mq_unique_tag(scmnd->request);
2488 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2489 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2491 ch = &target->ch[ch_idx];
2492 if (!srp_claim_req(ch, req, NULL, scmnd))
2494 shost_printk(KERN_ERR, target->scsi_host,
2495 "Sending SRP abort for tag %#x\n", tag);
2496 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2497 SRP_TSK_ABORT_TASK) == 0)
2499 else if (target->rport->state == SRP_RPORT_LOST)
2503 srp_free_req(ch, req, scmnd, 0);
2504 scmnd->result = DID_ABORT << 16;
2505 scmnd->scsi_done(scmnd);
2510 static int srp_reset_device(struct scsi_cmnd *scmnd)
2512 struct srp_target_port *target = host_to_target(scmnd->device->host);
2513 struct srp_rdma_ch *ch;
2516 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2518 ch = &target->ch[0];
2519 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2522 if (ch->tsk_mgmt_status)
2525 for (i = 0; i < target->ch_count; i++) {
2526 ch = &target->ch[i];
2527 for (i = 0; i < target->req_ring_size; ++i) {
2528 struct srp_request *req = &ch->req_ring[i];
2530 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2537 static int srp_reset_host(struct scsi_cmnd *scmnd)
2539 struct srp_target_port *target = host_to_target(scmnd->device->host);
2541 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2543 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2546 static int srp_slave_configure(struct scsi_device *sdev)
2548 struct Scsi_Host *shost = sdev->host;
2549 struct srp_target_port *target = host_to_target(shost);
2550 struct request_queue *q = sdev->request_queue;
2551 unsigned long timeout;
2553 if (sdev->type == TYPE_DISK) {
2554 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2555 blk_queue_rq_timeout(q, timeout);
2561 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2564 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2566 return sprintf(buf, "0x%016llx\n",
2567 (unsigned long long) be64_to_cpu(target->id_ext));
2570 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2573 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2575 return sprintf(buf, "0x%016llx\n",
2576 (unsigned long long) be64_to_cpu(target->ioc_guid));
2579 static ssize_t show_service_id(struct device *dev,
2580 struct device_attribute *attr, char *buf)
2582 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2584 return sprintf(buf, "0x%016llx\n",
2585 (unsigned long long) be64_to_cpu(target->service_id));
2588 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2591 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2593 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2596 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2599 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2601 return sprintf(buf, "%pI6\n", target->sgid.raw);
2604 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2607 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2608 struct srp_rdma_ch *ch = &target->ch[0];
2610 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2613 static ssize_t show_orig_dgid(struct device *dev,
2614 struct device_attribute *attr, char *buf)
2616 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2618 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2621 static ssize_t show_req_lim(struct device *dev,
2622 struct device_attribute *attr, char *buf)
2624 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2625 struct srp_rdma_ch *ch;
2626 int i, req_lim = INT_MAX;
2628 for (i = 0; i < target->ch_count; i++) {
2629 ch = &target->ch[i];
2630 req_lim = min(req_lim, ch->req_lim);
2632 return sprintf(buf, "%d\n", req_lim);
2635 static ssize_t show_zero_req_lim(struct device *dev,
2636 struct device_attribute *attr, char *buf)
2638 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2640 return sprintf(buf, "%d\n", target->zero_req_lim);
2643 static ssize_t show_local_ib_port(struct device *dev,
2644 struct device_attribute *attr, char *buf)
2646 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2648 return sprintf(buf, "%d\n", target->srp_host->port);
2651 static ssize_t show_local_ib_device(struct device *dev,
2652 struct device_attribute *attr, char *buf)
2654 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2656 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2659 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2662 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2664 return sprintf(buf, "%d\n", target->ch_count);
2667 static ssize_t show_comp_vector(struct device *dev,
2668 struct device_attribute *attr, char *buf)
2670 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2672 return sprintf(buf, "%d\n", target->comp_vector);
2675 static ssize_t show_tl_retry_count(struct device *dev,
2676 struct device_attribute *attr, char *buf)
2678 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2680 return sprintf(buf, "%d\n", target->tl_retry_count);
2683 static ssize_t show_cmd_sg_entries(struct device *dev,
2684 struct device_attribute *attr, char *buf)
2686 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2688 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2691 static ssize_t show_allow_ext_sg(struct device *dev,
2692 struct device_attribute *attr, char *buf)
2694 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2696 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2699 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2700 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2701 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2702 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2703 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2704 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2705 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2706 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2707 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2708 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2709 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2710 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2711 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2712 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2713 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2714 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2716 static struct device_attribute *srp_host_attrs[] = {
2719 &dev_attr_service_id,
2723 &dev_attr_orig_dgid,
2725 &dev_attr_zero_req_lim,
2726 &dev_attr_local_ib_port,
2727 &dev_attr_local_ib_device,
2729 &dev_attr_comp_vector,
2730 &dev_attr_tl_retry_count,
2731 &dev_attr_cmd_sg_entries,
2732 &dev_attr_allow_ext_sg,
2736 static struct scsi_host_template srp_template = {
2737 .module = THIS_MODULE,
2738 .name = "InfiniBand SRP initiator",
2739 .proc_name = DRV_NAME,
2740 .slave_configure = srp_slave_configure,
2741 .info = srp_target_info,
2742 .queuecommand = srp_queuecommand,
2743 .change_queue_depth = srp_change_queue_depth,
2744 .eh_abort_handler = srp_abort,
2745 .eh_device_reset_handler = srp_reset_device,
2746 .eh_host_reset_handler = srp_reset_host,
2747 .skip_settle_delay = true,
2748 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2749 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2751 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2752 .use_clustering = ENABLE_CLUSTERING,
2753 .shost_attrs = srp_host_attrs,
2755 .track_queue_depth = 1,
2758 static int srp_sdev_count(struct Scsi_Host *host)
2760 struct scsi_device *sdev;
2763 shost_for_each_device(sdev, host)
2769 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2771 struct srp_rport_identifiers ids;
2772 struct srp_rport *rport;
2774 target->state = SRP_TARGET_SCANNING;
2775 sprintf(target->target_name, "SRP.T10:%016llX",
2776 (unsigned long long) be64_to_cpu(target->id_ext));
2778 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2781 memcpy(ids.port_id, &target->id_ext, 8);
2782 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2783 ids.roles = SRP_RPORT_ROLE_TARGET;
2784 rport = srp_rport_add(target->scsi_host, &ids);
2785 if (IS_ERR(rport)) {
2786 scsi_remove_host(target->scsi_host);
2787 return PTR_ERR(rport);
2790 rport->lld_data = target;
2791 target->rport = rport;
2793 spin_lock(&host->target_lock);
2794 list_add_tail(&target->list, &host->target_list);
2795 spin_unlock(&host->target_lock);
2797 scsi_scan_target(&target->scsi_host->shost_gendev,
2798 0, target->scsi_id, SCAN_WILD_CARD, 0);
2800 if (!target->connected || target->qp_in_error) {
2801 shost_printk(KERN_INFO, target->scsi_host,
2802 PFX "SCSI scan failed - removing SCSI host\n");
2803 srp_queue_remove_work(target);
2807 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2808 dev_name(&target->scsi_host->shost_gendev),
2809 srp_sdev_count(target->scsi_host));
2811 spin_lock_irq(&target->lock);
2812 if (target->state == SRP_TARGET_SCANNING)
2813 target->state = SRP_TARGET_LIVE;
2814 spin_unlock_irq(&target->lock);
2820 static void srp_release_dev(struct device *dev)
2822 struct srp_host *host =
2823 container_of(dev, struct srp_host, dev);
2825 complete(&host->released);
2828 static struct class srp_class = {
2829 .name = "infiniband_srp",
2830 .dev_release = srp_release_dev
2834 * srp_conn_unique() - check whether the connection to a target is unique
2836 * @target: SRP target port.
2838 static bool srp_conn_unique(struct srp_host *host,
2839 struct srp_target_port *target)
2841 struct srp_target_port *t;
2844 if (target->state == SRP_TARGET_REMOVED)
2849 spin_lock(&host->target_lock);
2850 list_for_each_entry(t, &host->target_list, list) {
2852 target->id_ext == t->id_ext &&
2853 target->ioc_guid == t->ioc_guid &&
2854 target->initiator_ext == t->initiator_ext) {
2859 spin_unlock(&host->target_lock);
2866 * Target ports are added by writing
2868 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2869 * pkey=<P_Key>,service_id=<service ID>
2871 * to the add_target sysfs attribute.
2875 SRP_OPT_ID_EXT = 1 << 0,
2876 SRP_OPT_IOC_GUID = 1 << 1,
2877 SRP_OPT_DGID = 1 << 2,
2878 SRP_OPT_PKEY = 1 << 3,
2879 SRP_OPT_SERVICE_ID = 1 << 4,
2880 SRP_OPT_MAX_SECT = 1 << 5,
2881 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2882 SRP_OPT_IO_CLASS = 1 << 7,
2883 SRP_OPT_INITIATOR_EXT = 1 << 8,
2884 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2885 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2886 SRP_OPT_SG_TABLESIZE = 1 << 11,
2887 SRP_OPT_COMP_VECTOR = 1 << 12,
2888 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2889 SRP_OPT_QUEUE_SIZE = 1 << 14,
2890 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2894 SRP_OPT_SERVICE_ID),
2897 static const match_table_t srp_opt_tokens = {
2898 { SRP_OPT_ID_EXT, "id_ext=%s" },
2899 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2900 { SRP_OPT_DGID, "dgid=%s" },
2901 { SRP_OPT_PKEY, "pkey=%x" },
2902 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2903 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2904 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2905 { SRP_OPT_IO_CLASS, "io_class=%x" },
2906 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2907 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2908 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2909 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2910 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2911 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2912 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2913 { SRP_OPT_ERR, NULL }
2916 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2918 char *options, *sep_opt;
2921 substring_t args[MAX_OPT_ARGS];
2927 options = kstrdup(buf, GFP_KERNEL);
2932 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2936 token = match_token(p, srp_opt_tokens, args);
2940 case SRP_OPT_ID_EXT:
2941 p = match_strdup(args);
2946 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2950 case SRP_OPT_IOC_GUID:
2951 p = match_strdup(args);
2956 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2961 p = match_strdup(args);
2966 if (strlen(p) != 32) {
2967 pr_warn("bad dest GID parameter '%s'\n", p);
2972 for (i = 0; i < 16; ++i) {
2973 strlcpy(dgid, p + i * 2, sizeof(dgid));
2974 if (sscanf(dgid, "%hhx",
2975 &target->orig_dgid.raw[i]) < 1) {
2985 if (match_hex(args, &token)) {
2986 pr_warn("bad P_Key parameter '%s'\n", p);
2989 target->pkey = cpu_to_be16(token);
2992 case SRP_OPT_SERVICE_ID:
2993 p = match_strdup(args);
2998 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3002 case SRP_OPT_MAX_SECT:
3003 if (match_int(args, &token)) {
3004 pr_warn("bad max sect parameter '%s'\n", p);
3007 target->scsi_host->max_sectors = token;
3010 case SRP_OPT_QUEUE_SIZE:
3011 if (match_int(args, &token) || token < 1) {
3012 pr_warn("bad queue_size parameter '%s'\n", p);
3015 target->scsi_host->can_queue = token;
3016 target->queue_size = token + SRP_RSP_SQ_SIZE +
3017 SRP_TSK_MGMT_SQ_SIZE;
3018 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3019 target->scsi_host->cmd_per_lun = token;
3022 case SRP_OPT_MAX_CMD_PER_LUN:
3023 if (match_int(args, &token) || token < 1) {
3024 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3028 target->scsi_host->cmd_per_lun = token;
3031 case SRP_OPT_IO_CLASS:
3032 if (match_hex(args, &token)) {
3033 pr_warn("bad IO class parameter '%s'\n", p);
3036 if (token != SRP_REV10_IB_IO_CLASS &&
3037 token != SRP_REV16A_IB_IO_CLASS) {
3038 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3039 token, SRP_REV10_IB_IO_CLASS,
3040 SRP_REV16A_IB_IO_CLASS);
3043 target->io_class = token;
3046 case SRP_OPT_INITIATOR_EXT:
3047 p = match_strdup(args);
3052 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3056 case SRP_OPT_CMD_SG_ENTRIES:
3057 if (match_int(args, &token) || token < 1 || token > 255) {
3058 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3062 target->cmd_sg_cnt = token;
3065 case SRP_OPT_ALLOW_EXT_SG:
3066 if (match_int(args, &token)) {
3067 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3070 target->allow_ext_sg = !!token;
3073 case SRP_OPT_SG_TABLESIZE:
3074 if (match_int(args, &token) || token < 1 ||
3075 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3076 pr_warn("bad max sg_tablesize parameter '%s'\n",
3080 target->sg_tablesize = token;
3083 case SRP_OPT_COMP_VECTOR:
3084 if (match_int(args, &token) || token < 0) {
3085 pr_warn("bad comp_vector parameter '%s'\n", p);
3088 target->comp_vector = token;
3091 case SRP_OPT_TL_RETRY_COUNT:
3092 if (match_int(args, &token) || token < 2 || token > 7) {
3093 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3097 target->tl_retry_count = token;
3101 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3107 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3110 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3111 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3112 !(srp_opt_tokens[i].token & opt_mask))
3113 pr_warn("target creation request is missing parameter '%s'\n",
3114 srp_opt_tokens[i].pattern);
3116 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3117 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3118 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3119 target->scsi_host->cmd_per_lun,
3120 target->scsi_host->can_queue);
3127 static ssize_t srp_create_target(struct device *dev,
3128 struct device_attribute *attr,
3129 const char *buf, size_t count)
3131 struct srp_host *host =
3132 container_of(dev, struct srp_host, dev);
3133 struct Scsi_Host *target_host;
3134 struct srp_target_port *target;
3135 struct srp_rdma_ch *ch;
3136 struct srp_device *srp_dev = host->srp_dev;
3137 struct ib_device *ibdev = srp_dev->dev;
3138 int ret, node_idx, node, cpu, i;
3139 bool multich = false;
3141 target_host = scsi_host_alloc(&srp_template,
3142 sizeof (struct srp_target_port));
3146 target_host->transportt = ib_srp_transport_template;
3147 target_host->max_channel = 0;
3148 target_host->max_id = 1;
3149 target_host->max_lun = SRP_MAX_LUN;
3150 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3152 target = host_to_target(target_host);
3154 target->io_class = SRP_REV16A_IB_IO_CLASS;
3155 target->scsi_host = target_host;
3156 target->srp_host = host;
3157 target->lkey = host->srp_dev->mr->lkey;
3158 target->rkey = host->srp_dev->mr->rkey;
3159 target->cmd_sg_cnt = cmd_sg_entries;
3160 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3161 target->allow_ext_sg = allow_ext_sg;
3162 target->tl_retry_count = 7;
3163 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3166 * Avoid that the SCSI host can be removed by srp_remove_target()
3167 * before this function returns.
3169 scsi_host_get(target->scsi_host);
3171 mutex_lock(&host->add_target_mutex);
3173 ret = srp_parse_options(buf, target);
3177 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3181 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3183 if (!srp_conn_unique(target->srp_host, target)) {
3184 shost_printk(KERN_INFO, target->scsi_host,
3185 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3186 be64_to_cpu(target->id_ext),
3187 be64_to_cpu(target->ioc_guid),
3188 be64_to_cpu(target->initiator_ext));
3193 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3194 target->cmd_sg_cnt < target->sg_tablesize) {
3195 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3196 target->sg_tablesize = target->cmd_sg_cnt;
3199 target_host->sg_tablesize = target->sg_tablesize;
3200 target->indirect_size = target->sg_tablesize *
3201 sizeof (struct srp_direct_buf);
3202 target->max_iu_len = sizeof (struct srp_cmd) +
3203 sizeof (struct srp_indirect_buf) +
3204 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3206 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3207 INIT_WORK(&target->remove_work, srp_remove_work);
3208 spin_lock_init(&target->lock);
3209 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3214 target->ch_count = max_t(unsigned, num_online_nodes(),
3216 min(4 * num_online_nodes(),
3217 ibdev->num_comp_vectors),
3218 num_online_cpus()));
3219 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3225 for_each_online_node(node) {
3226 const int ch_start = (node_idx * target->ch_count /
3227 num_online_nodes());
3228 const int ch_end = ((node_idx + 1) * target->ch_count /
3229 num_online_nodes());
3230 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3231 num_online_nodes() + target->comp_vector)
3232 % ibdev->num_comp_vectors;
3233 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3234 num_online_nodes() + target->comp_vector)
3235 % ibdev->num_comp_vectors;
3238 for_each_online_cpu(cpu) {
3239 if (cpu_to_node(cpu) != node)
3241 if (ch_start + cpu_idx >= ch_end)
3243 ch = &target->ch[ch_start + cpu_idx];
3244 ch->target = target;
3245 ch->comp_vector = cv_start == cv_end ? cv_start :
3246 cv_start + cpu_idx % (cv_end - cv_start);
3247 spin_lock_init(&ch->lock);
3248 INIT_LIST_HEAD(&ch->free_tx);
3249 ret = srp_new_cm_id(ch);
3251 goto err_disconnect;
3253 ret = srp_create_ch_ib(ch);
3255 goto err_disconnect;
3257 ret = srp_alloc_req_data(ch);
3259 goto err_disconnect;
3261 ret = srp_connect_ch(ch, multich);
3263 shost_printk(KERN_ERR, target->scsi_host,
3264 PFX "Connection %d/%d failed\n",
3267 if (node_idx == 0 && cpu_idx == 0) {
3268 goto err_disconnect;
3270 srp_free_ch_ib(target, ch);
3271 srp_free_req_data(target, ch);
3272 target->ch_count = ch - target->ch;
3283 target->scsi_host->nr_hw_queues = target->ch_count;
3285 ret = srp_add_target(host, target);
3287 goto err_disconnect;
3289 if (target->state != SRP_TARGET_REMOVED) {
3290 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3291 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3292 be64_to_cpu(target->id_ext),
3293 be64_to_cpu(target->ioc_guid),
3294 be16_to_cpu(target->pkey),
3295 be64_to_cpu(target->service_id),
3296 target->sgid.raw, target->orig_dgid.raw);
3302 mutex_unlock(&host->add_target_mutex);
3304 scsi_host_put(target->scsi_host);
3309 srp_disconnect_target(target);
3311 for (i = 0; i < target->ch_count; i++) {
3312 ch = &target->ch[i];
3313 srp_free_ch_ib(target, ch);
3314 srp_free_req_data(target, ch);
3320 scsi_host_put(target_host);
3324 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3326 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3329 struct srp_host *host = container_of(dev, struct srp_host, dev);
3331 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3334 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3336 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3339 struct srp_host *host = container_of(dev, struct srp_host, dev);
3341 return sprintf(buf, "%d\n", host->port);
3344 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3346 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3348 struct srp_host *host;
3350 host = kzalloc(sizeof *host, GFP_KERNEL);
3354 INIT_LIST_HEAD(&host->target_list);
3355 spin_lock_init(&host->target_lock);
3356 init_completion(&host->released);
3357 mutex_init(&host->add_target_mutex);
3358 host->srp_dev = device;
3361 host->dev.class = &srp_class;
3362 host->dev.parent = device->dev->dma_device;
3363 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3365 if (device_register(&host->dev))
3367 if (device_create_file(&host->dev, &dev_attr_add_target))
3369 if (device_create_file(&host->dev, &dev_attr_ibdev))
3371 if (device_create_file(&host->dev, &dev_attr_port))
3377 device_unregister(&host->dev);
3385 static void srp_add_one(struct ib_device *device)
3387 struct srp_device *srp_dev;
3388 struct ib_device_attr *dev_attr;
3389 struct srp_host *host;
3390 int mr_page_shift, s, e, p;
3391 u64 max_pages_per_mr;
3393 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3397 if (ib_query_device(device, dev_attr)) {
3398 pr_warn("Query device failed for %s\n", device->name);
3402 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3406 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3407 device->map_phys_fmr && device->unmap_fmr);
3408 srp_dev->has_fr = (dev_attr->device_cap_flags &
3409 IB_DEVICE_MEM_MGT_EXTENSIONS);
3410 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3411 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3413 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3414 (!srp_dev->has_fmr || prefer_fr));
3417 * Use the smallest page size supported by the HCA, down to a
3418 * minimum of 4096 bytes. We're unlikely to build large sglists
3419 * out of smaller entries.
3421 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3422 srp_dev->mr_page_size = 1 << mr_page_shift;
3423 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3424 max_pages_per_mr = dev_attr->max_mr_size;
3425 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3426 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3428 if (srp_dev->use_fast_reg) {
3429 srp_dev->max_pages_per_mr =
3430 min_t(u32, srp_dev->max_pages_per_mr,
3431 dev_attr->max_fast_reg_page_list_len);
3433 srp_dev->mr_max_size = srp_dev->mr_page_size *
3434 srp_dev->max_pages_per_mr;
3435 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3436 device->name, mr_page_shift, dev_attr->max_mr_size,
3437 dev_attr->max_fast_reg_page_list_len,
3438 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3440 INIT_LIST_HEAD(&srp_dev->dev_list);
3442 srp_dev->dev = device;
3443 srp_dev->pd = ib_alloc_pd(device);
3444 if (IS_ERR(srp_dev->pd))
3447 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3448 IB_ACCESS_LOCAL_WRITE |
3449 IB_ACCESS_REMOTE_READ |
3450 IB_ACCESS_REMOTE_WRITE);
3451 if (IS_ERR(srp_dev->mr))
3454 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3459 e = device->phys_port_cnt;
3462 for (p = s; p <= e; ++p) {
3463 host = srp_add_port(srp_dev, p);
3465 list_add_tail(&host->list, &srp_dev->dev_list);
3468 ib_set_client_data(device, &srp_client, srp_dev);
3473 ib_dealloc_pd(srp_dev->pd);
3482 static void srp_remove_one(struct ib_device *device)
3484 struct srp_device *srp_dev;
3485 struct srp_host *host, *tmp_host;
3486 struct srp_target_port *target;
3488 srp_dev = ib_get_client_data(device, &srp_client);
3492 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3493 device_unregister(&host->dev);
3495 * Wait for the sysfs entry to go away, so that no new
3496 * target ports can be created.
3498 wait_for_completion(&host->released);
3501 * Remove all target ports.
3503 spin_lock(&host->target_lock);
3504 list_for_each_entry(target, &host->target_list, list)
3505 srp_queue_remove_work(target);
3506 spin_unlock(&host->target_lock);
3509 * Wait for tl_err and target port removal tasks.
3511 flush_workqueue(system_long_wq);
3512 flush_workqueue(srp_remove_wq);
3517 ib_dereg_mr(srp_dev->mr);
3518 ib_dealloc_pd(srp_dev->pd);
3523 static struct srp_function_template ib_srp_transport_functions = {
3524 .has_rport_state = true,
3525 .reset_timer_if_blocked = true,
3526 .reconnect_delay = &srp_reconnect_delay,
3527 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3528 .dev_loss_tmo = &srp_dev_loss_tmo,
3529 .reconnect = srp_rport_reconnect,
3530 .rport_delete = srp_rport_delete,
3531 .terminate_rport_io = srp_terminate_io,
3534 static int __init srp_init_module(void)
3538 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3540 if (srp_sg_tablesize) {
3541 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3542 if (!cmd_sg_entries)
3543 cmd_sg_entries = srp_sg_tablesize;
3546 if (!cmd_sg_entries)
3547 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3549 if (cmd_sg_entries > 255) {
3550 pr_warn("Clamping cmd_sg_entries to 255\n");
3551 cmd_sg_entries = 255;
3554 if (!indirect_sg_entries)
3555 indirect_sg_entries = cmd_sg_entries;
3556 else if (indirect_sg_entries < cmd_sg_entries) {
3557 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3559 indirect_sg_entries = cmd_sg_entries;
3562 srp_remove_wq = create_workqueue("srp_remove");
3563 if (!srp_remove_wq) {
3569 ib_srp_transport_template =
3570 srp_attach_transport(&ib_srp_transport_functions);
3571 if (!ib_srp_transport_template)
3574 ret = class_register(&srp_class);
3576 pr_err("couldn't register class infiniband_srp\n");
3580 ib_sa_register_client(&srp_sa_client);
3582 ret = ib_register_client(&srp_client);
3584 pr_err("couldn't register IB client\n");
3592 ib_sa_unregister_client(&srp_sa_client);
3593 class_unregister(&srp_class);
3596 srp_release_transport(ib_srp_transport_template);
3599 destroy_workqueue(srp_remove_wq);
3603 static void __exit srp_cleanup_module(void)
3605 ib_unregister_client(&srp_client);
3606 ib_sa_unregister_client(&srp_sa_client);
3607 class_unregister(&srp_class);
3608 srp_release_transport(ib_srp_transport_template);
3609 destroy_workqueue(srp_remove_wq);
3612 module_init(srp_init_module);
3613 module_exit(srp_cleanup_module);