2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
45 #include <linux/atomic.h>
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_transport_srp.h>
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "1.0"
59 #define DRV_RELDATE "July 1, 2013"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 "v" DRV_VERSION " (" DRV_RELDATE ")");
64 MODULE_LICENSE("Dual BSD/GPL");
66 static unsigned int srp_sg_tablesize;
67 static unsigned int cmd_sg_entries;
68 static unsigned int indirect_sg_entries;
69 static bool allow_ext_sg;
70 static bool prefer_fr;
71 static bool register_always;
72 static int topspin_workarounds = 1;
74 module_param(srp_sg_tablesize, uint, 0444);
75 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77 module_param(cmd_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(cmd_sg_entries,
79 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81 module_param(indirect_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(indirect_sg_entries,
83 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85 module_param(allow_ext_sg, bool, 0444);
86 MODULE_PARM_DESC(allow_ext_sg,
87 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89 module_param(topspin_workarounds, int, 0444);
90 MODULE_PARM_DESC(topspin_workarounds,
91 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93 module_param(prefer_fr, bool, 0444);
94 MODULE_PARM_DESC(prefer_fr,
95 "Whether to use fast registration if both FMR and fast registration are supported");
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
101 static struct kernel_param_ops srp_tmo_ops;
103 static int srp_reconnect_delay = 10;
104 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108 static int srp_fast_io_fail_tmo = 15;
109 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 MODULE_PARM_DESC(fast_io_fail_tmo,
112 "Number of seconds between the observation of a transport"
113 " layer error and failing all I/O. \"off\" means that this"
114 " functionality is disabled.");
116 static int srp_dev_loss_tmo = 600;
117 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 MODULE_PARM_DESC(dev_loss_tmo,
120 "Maximum number of seconds that the SRP transport should"
121 " insulate transport layer errors. After this time has been"
122 " exceeded the SCSI host is removed. Should be"
123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 " this functionality is disabled.");
127 static unsigned ch_count;
128 module_param(ch_count, uint, 0444);
129 MODULE_PARM_DESC(ch_count,
130 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132 static void srp_add_one(struct ib_device *device);
133 static void srp_remove_one(struct ib_device *device);
134 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
136 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138 static struct scsi_transport_template *ib_srp_transport_template;
139 static struct workqueue_struct *srp_remove_wq;
141 static struct ib_client srp_client = {
144 .remove = srp_remove_one
147 static struct ib_sa_client srp_sa_client;
149 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151 int tmo = *(int *)kp->arg;
154 return sprintf(buffer, "%d", tmo);
156 return sprintf(buffer, "off");
159 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
163 if (strncmp(val, "off", 3) != 0) {
164 res = kstrtoint(val, 0, &tmo);
170 if (kp->arg == &srp_reconnect_delay)
171 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 else if (kp->arg == &srp_fast_io_fail_tmo)
174 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
176 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
180 *(int *)kp->arg = tmo;
186 static struct kernel_param_ops srp_tmo_ops = {
191 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
193 return (struct srp_target_port *) host->hostdata;
196 static const char *srp_target_info(struct Scsi_Host *host)
198 return host_to_target(host)->target_name;
201 static int srp_target_is_topspin(struct srp_target_port *target)
203 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
204 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
206 return topspin_workarounds &&
207 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
211 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
213 enum dma_data_direction direction)
217 iu = kmalloc(sizeof *iu, gfp_mask);
221 iu->buf = kzalloc(size, gfp_mask);
225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
231 iu->direction = direction;
243 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
254 static void srp_qp_event(struct ib_event *event, void *context)
256 pr_debug("QP event %s (%d)\n",
257 ib_event_msg(event->event), event->event);
260 static int srp_init_qp(struct srp_target_port *target,
263 struct ib_qp_attr *attr;
266 attr = kmalloc(sizeof *attr, GFP_KERNEL);
270 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
271 target->srp_host->port,
272 be16_to_cpu(target->pkey),
277 attr->qp_state = IB_QPS_INIT;
278 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
279 IB_ACCESS_REMOTE_WRITE);
280 attr->port_num = target->srp_host->port;
282 ret = ib_modify_qp(qp, attr,
293 static int srp_new_cm_id(struct srp_rdma_ch *ch)
295 struct srp_target_port *target = ch->target;
296 struct ib_cm_id *new_cm_id;
298 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
300 if (IS_ERR(new_cm_id))
301 return PTR_ERR(new_cm_id);
304 ib_destroy_cm_id(ch->cm_id);
305 ch->cm_id = new_cm_id;
306 ch->path.sgid = target->sgid;
307 ch->path.dgid = target->orig_dgid;
308 ch->path.pkey = target->pkey;
309 ch->path.service_id = target->service_id;
314 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
316 struct srp_device *dev = target->srp_host->srp_dev;
317 struct ib_fmr_pool_param fmr_param;
319 memset(&fmr_param, 0, sizeof(fmr_param));
320 fmr_param.pool_size = target->scsi_host->can_queue;
321 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
323 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
324 fmr_param.page_shift = ilog2(dev->mr_page_size);
325 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
326 IB_ACCESS_REMOTE_WRITE |
327 IB_ACCESS_REMOTE_READ);
329 return ib_create_fmr_pool(dev->pd, &fmr_param);
333 * srp_destroy_fr_pool() - free the resources owned by a pool
334 * @pool: Fast registration pool to be destroyed.
336 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
339 struct srp_fr_desc *d;
344 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
346 ib_free_fast_reg_page_list(d->frpl);
354 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
355 * @device: IB device to allocate fast registration descriptors for.
356 * @pd: Protection domain associated with the FR descriptors.
357 * @pool_size: Number of descriptors to allocate.
358 * @max_page_list_len: Maximum fast registration work request page list length.
360 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
361 struct ib_pd *pd, int pool_size,
362 int max_page_list_len)
364 struct srp_fr_pool *pool;
365 struct srp_fr_desc *d;
367 struct ib_fast_reg_page_list *frpl;
368 int i, ret = -EINVAL;
373 pool = kzalloc(sizeof(struct srp_fr_pool) +
374 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
377 pool->size = pool_size;
378 pool->max_page_list_len = max_page_list_len;
379 spin_lock_init(&pool->lock);
380 INIT_LIST_HEAD(&pool->free_list);
382 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
383 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
389 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
395 list_add_tail(&d->entry, &pool->free_list);
402 srp_destroy_fr_pool(pool);
410 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
411 * @pool: Pool to obtain descriptor from.
413 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
415 struct srp_fr_desc *d = NULL;
418 spin_lock_irqsave(&pool->lock, flags);
419 if (!list_empty(&pool->free_list)) {
420 d = list_first_entry(&pool->free_list, typeof(*d), entry);
423 spin_unlock_irqrestore(&pool->lock, flags);
429 * srp_fr_pool_put() - put an FR descriptor back in the free list
430 * @pool: Pool the descriptor was allocated from.
431 * @desc: Pointer to an array of fast registration descriptor pointers.
432 * @n: Number of descriptors to put back.
434 * Note: The caller must already have queued an invalidation request for
435 * desc->mr->rkey before calling this function.
437 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
443 spin_lock_irqsave(&pool->lock, flags);
444 for (i = 0; i < n; i++)
445 list_add(&desc[i]->entry, &pool->free_list);
446 spin_unlock_irqrestore(&pool->lock, flags);
449 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
451 struct srp_device *dev = target->srp_host->srp_dev;
453 return srp_create_fr_pool(dev->dev, dev->pd,
454 target->scsi_host->can_queue,
455 dev->max_pages_per_mr);
459 * srp_destroy_qp() - destroy an RDMA queue pair
460 * @ch: SRP RDMA channel.
462 * Change a queue pair into the error state and wait until all receive
463 * completions have been processed before destroying it. This avoids that
464 * the receive completion handler can access the queue pair while it is
467 static void srp_destroy_qp(struct srp_rdma_ch *ch)
469 struct srp_target_port *target = ch->target;
470 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
471 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
472 struct ib_recv_wr *bad_wr;
475 /* Destroying a QP and reusing ch->done is only safe if not connected */
476 WARN_ON_ONCE(target->connected);
478 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
479 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
483 init_completion(&ch->done);
484 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
485 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
487 wait_for_completion(&ch->done);
490 ib_destroy_qp(ch->qp);
493 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
495 struct srp_target_port *target = ch->target;
496 struct srp_device *dev = target->srp_host->srp_dev;
497 struct ib_qp_init_attr *init_attr;
498 struct ib_cq *recv_cq, *send_cq;
500 struct ib_fmr_pool *fmr_pool = NULL;
501 struct srp_fr_pool *fr_pool = NULL;
502 const int m = 1 + dev->use_fast_reg;
505 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
509 /* + 1 for SRP_LAST_WR_ID */
510 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
511 target->queue_size + 1, ch->comp_vector);
512 if (IS_ERR(recv_cq)) {
513 ret = PTR_ERR(recv_cq);
517 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
518 m * target->queue_size, ch->comp_vector);
519 if (IS_ERR(send_cq)) {
520 ret = PTR_ERR(send_cq);
524 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
526 init_attr->event_handler = srp_qp_event;
527 init_attr->cap.max_send_wr = m * target->queue_size;
528 init_attr->cap.max_recv_wr = target->queue_size + 1;
529 init_attr->cap.max_recv_sge = 1;
530 init_attr->cap.max_send_sge = 1;
531 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
532 init_attr->qp_type = IB_QPT_RC;
533 init_attr->send_cq = send_cq;
534 init_attr->recv_cq = recv_cq;
536 qp = ib_create_qp(dev->pd, init_attr);
542 ret = srp_init_qp(target, qp);
546 if (dev->use_fast_reg && dev->has_fr) {
547 fr_pool = srp_alloc_fr_pool(target);
548 if (IS_ERR(fr_pool)) {
549 ret = PTR_ERR(fr_pool);
550 shost_printk(KERN_WARNING, target->scsi_host, PFX
551 "FR pool allocation failed (%d)\n", ret);
555 srp_destroy_fr_pool(ch->fr_pool);
556 ch->fr_pool = fr_pool;
557 } else if (!dev->use_fast_reg && dev->has_fmr) {
558 fmr_pool = srp_alloc_fmr_pool(target);
559 if (IS_ERR(fmr_pool)) {
560 ret = PTR_ERR(fmr_pool);
561 shost_printk(KERN_WARNING, target->scsi_host, PFX
562 "FMR pool allocation failed (%d)\n", ret);
566 ib_destroy_fmr_pool(ch->fmr_pool);
567 ch->fmr_pool = fmr_pool;
573 ib_destroy_cq(ch->recv_cq);
575 ib_destroy_cq(ch->send_cq);
578 ch->recv_cq = recv_cq;
579 ch->send_cq = send_cq;
588 ib_destroy_cq(send_cq);
591 ib_destroy_cq(recv_cq);
599 * Note: this function may be called without srp_alloc_iu_bufs() having been
600 * invoked. Hence the ch->[rt]x_ring checks.
602 static void srp_free_ch_ib(struct srp_target_port *target,
603 struct srp_rdma_ch *ch)
605 struct srp_device *dev = target->srp_host->srp_dev;
612 ib_destroy_cm_id(ch->cm_id);
616 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
620 if (dev->use_fast_reg) {
622 srp_destroy_fr_pool(ch->fr_pool);
625 ib_destroy_fmr_pool(ch->fmr_pool);
628 ib_destroy_cq(ch->send_cq);
629 ib_destroy_cq(ch->recv_cq);
632 * Avoid that the SCSI error handler tries to use this channel after
633 * it has been freed. The SCSI error handler can namely continue
634 * trying to perform recovery actions after scsi_remove_host()
640 ch->send_cq = ch->recv_cq = NULL;
643 for (i = 0; i < target->queue_size; ++i)
644 srp_free_iu(target->srp_host, ch->rx_ring[i]);
649 for (i = 0; i < target->queue_size; ++i)
650 srp_free_iu(target->srp_host, ch->tx_ring[i]);
656 static void srp_path_rec_completion(int status,
657 struct ib_sa_path_rec *pathrec,
660 struct srp_rdma_ch *ch = ch_ptr;
661 struct srp_target_port *target = ch->target;
665 shost_printk(KERN_ERR, target->scsi_host,
666 PFX "Got failed path rec status %d\n", status);
672 static int srp_lookup_path(struct srp_rdma_ch *ch)
674 struct srp_target_port *target = ch->target;
677 ch->path.numb_path = 1;
679 init_completion(&ch->done);
681 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
682 target->srp_host->srp_dev->dev,
683 target->srp_host->port,
685 IB_SA_PATH_REC_SERVICE_ID |
686 IB_SA_PATH_REC_DGID |
687 IB_SA_PATH_REC_SGID |
688 IB_SA_PATH_REC_NUMB_PATH |
690 SRP_PATH_REC_TIMEOUT_MS,
692 srp_path_rec_completion,
693 ch, &ch->path_query);
694 if (ch->path_query_id < 0)
695 return ch->path_query_id;
697 ret = wait_for_completion_interruptible(&ch->done);
702 shost_printk(KERN_WARNING, target->scsi_host,
703 PFX "Path record query failed\n");
708 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
710 struct srp_target_port *target = ch->target;
712 struct ib_cm_req_param param;
713 struct srp_login_req priv;
717 req = kzalloc(sizeof *req, GFP_KERNEL);
721 req->param.primary_path = &ch->path;
722 req->param.alternate_path = NULL;
723 req->param.service_id = target->service_id;
724 req->param.qp_num = ch->qp->qp_num;
725 req->param.qp_type = ch->qp->qp_type;
726 req->param.private_data = &req->priv;
727 req->param.private_data_len = sizeof req->priv;
728 req->param.flow_control = 1;
730 get_random_bytes(&req->param.starting_psn, 4);
731 req->param.starting_psn &= 0xffffff;
734 * Pick some arbitrary defaults here; we could make these
735 * module parameters if anyone cared about setting them.
737 req->param.responder_resources = 4;
738 req->param.remote_cm_response_timeout = 20;
739 req->param.local_cm_response_timeout = 20;
740 req->param.retry_count = target->tl_retry_count;
741 req->param.rnr_retry_count = 7;
742 req->param.max_cm_retries = 15;
744 req->priv.opcode = SRP_LOGIN_REQ;
746 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
747 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
748 SRP_BUF_FORMAT_INDIRECT);
749 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
750 SRP_MULTICHAN_SINGLE);
752 * In the published SRP specification (draft rev. 16a), the
753 * port identifier format is 8 bytes of ID extension followed
754 * by 8 bytes of GUID. Older drafts put the two halves in the
755 * opposite order, so that the GUID comes first.
757 * Targets conforming to these obsolete drafts can be
758 * recognized by the I/O Class they report.
760 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
761 memcpy(req->priv.initiator_port_id,
762 &target->sgid.global.interface_id, 8);
763 memcpy(req->priv.initiator_port_id + 8,
764 &target->initiator_ext, 8);
765 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
766 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
768 memcpy(req->priv.initiator_port_id,
769 &target->initiator_ext, 8);
770 memcpy(req->priv.initiator_port_id + 8,
771 &target->sgid.global.interface_id, 8);
772 memcpy(req->priv.target_port_id, &target->id_ext, 8);
773 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
777 * Topspin/Cisco SRP targets will reject our login unless we
778 * zero out the first 8 bytes of our initiator port ID and set
779 * the second 8 bytes to the local node GUID.
781 if (srp_target_is_topspin(target)) {
782 shost_printk(KERN_DEBUG, target->scsi_host,
783 PFX "Topspin/Cisco initiator port ID workaround "
784 "activated for target GUID %016llx\n",
785 (unsigned long long) be64_to_cpu(target->ioc_guid));
786 memset(req->priv.initiator_port_id, 0, 8);
787 memcpy(req->priv.initiator_port_id + 8,
788 &target->srp_host->srp_dev->dev->node_guid, 8);
791 status = ib_send_cm_req(ch->cm_id, &req->param);
798 static bool srp_queue_remove_work(struct srp_target_port *target)
800 bool changed = false;
802 spin_lock_irq(&target->lock);
803 if (target->state != SRP_TARGET_REMOVED) {
804 target->state = SRP_TARGET_REMOVED;
807 spin_unlock_irq(&target->lock);
810 queue_work(srp_remove_wq, &target->remove_work);
815 static bool srp_change_conn_state(struct srp_target_port *target,
818 bool changed = false;
820 spin_lock_irq(&target->lock);
821 if (target->connected != connected) {
822 target->connected = connected;
825 spin_unlock_irq(&target->lock);
830 static void srp_disconnect_target(struct srp_target_port *target)
832 struct srp_rdma_ch *ch;
835 if (srp_change_conn_state(target, false)) {
836 /* XXX should send SRP_I_LOGOUT request */
838 for (i = 0; i < target->ch_count; i++) {
840 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
841 shost_printk(KERN_DEBUG, target->scsi_host,
842 PFX "Sending CM DREQ failed\n");
848 static void srp_free_req_data(struct srp_target_port *target,
849 struct srp_rdma_ch *ch)
851 struct srp_device *dev = target->srp_host->srp_dev;
852 struct ib_device *ibdev = dev->dev;
853 struct srp_request *req;
856 if (!ch->target || !ch->req_ring)
859 for (i = 0; i < target->req_ring_size; ++i) {
860 req = &ch->req_ring[i];
861 if (dev->use_fast_reg)
864 kfree(req->fmr_list);
865 kfree(req->map_page);
866 if (req->indirect_dma_addr) {
867 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
868 target->indirect_size,
871 kfree(req->indirect_desc);
878 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
880 struct srp_target_port *target = ch->target;
881 struct srp_device *srp_dev = target->srp_host->srp_dev;
882 struct ib_device *ibdev = srp_dev->dev;
883 struct srp_request *req;
886 int i, ret = -ENOMEM;
888 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
893 for (i = 0; i < target->req_ring_size; ++i) {
894 req = &ch->req_ring[i];
895 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
899 if (srp_dev->use_fast_reg)
900 req->fr_list = mr_list;
902 req->fmr_list = mr_list;
903 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
904 sizeof(void *), GFP_KERNEL);
907 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
908 if (!req->indirect_desc)
911 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
912 target->indirect_size,
914 if (ib_dma_mapping_error(ibdev, dma_addr))
917 req->indirect_dma_addr = dma_addr;
926 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
927 * @shost: SCSI host whose attributes to remove from sysfs.
929 * Note: Any attributes defined in the host template and that did not exist
930 * before invocation of this function will be ignored.
932 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
934 struct device_attribute **attr;
936 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
937 device_remove_file(&shost->shost_dev, *attr);
940 static void srp_remove_target(struct srp_target_port *target)
942 struct srp_rdma_ch *ch;
945 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
947 srp_del_scsi_host_attr(target->scsi_host);
948 srp_rport_get(target->rport);
949 srp_remove_host(target->scsi_host);
950 scsi_remove_host(target->scsi_host);
951 srp_stop_rport_timers(target->rport);
952 srp_disconnect_target(target);
953 for (i = 0; i < target->ch_count; i++) {
955 srp_free_ch_ib(target, ch);
957 cancel_work_sync(&target->tl_err_work);
958 srp_rport_put(target->rport);
959 for (i = 0; i < target->ch_count; i++) {
961 srp_free_req_data(target, ch);
966 spin_lock(&target->srp_host->target_lock);
967 list_del(&target->list);
968 spin_unlock(&target->srp_host->target_lock);
970 scsi_host_put(target->scsi_host);
973 static void srp_remove_work(struct work_struct *work)
975 struct srp_target_port *target =
976 container_of(work, struct srp_target_port, remove_work);
978 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
980 srp_remove_target(target);
983 static void srp_rport_delete(struct srp_rport *rport)
985 struct srp_target_port *target = rport->lld_data;
987 srp_queue_remove_work(target);
990 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
992 struct srp_target_port *target = ch->target;
995 WARN_ON_ONCE(!multich && target->connected);
997 target->qp_in_error = false;
999 ret = srp_lookup_path(ch);
1004 init_completion(&ch->done);
1005 ret = srp_send_req(ch, multich);
1008 ret = wait_for_completion_interruptible(&ch->done);
1013 * The CM event handling code will set status to
1014 * SRP_PORT_REDIRECT if we get a port redirect REJ
1015 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1016 * redirect REJ back.
1018 switch (ch->status) {
1020 srp_change_conn_state(target, true);
1023 case SRP_PORT_REDIRECT:
1024 ret = srp_lookup_path(ch);
1029 case SRP_DLID_REDIRECT:
1032 case SRP_STALE_CONN:
1033 shost_printk(KERN_ERR, target->scsi_host, PFX
1034 "giving up on stale connection\n");
1035 ch->status = -ECONNRESET;
1044 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1046 struct ib_send_wr *bad_wr;
1047 struct ib_send_wr wr = {
1048 .opcode = IB_WR_LOCAL_INV,
1049 .wr_id = LOCAL_INV_WR_ID_MASK,
1053 .ex.invalidate_rkey = rkey,
1056 return ib_post_send(ch->qp, &wr, &bad_wr);
1059 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1060 struct srp_rdma_ch *ch,
1061 struct srp_request *req)
1063 struct srp_target_port *target = ch->target;
1064 struct srp_device *dev = target->srp_host->srp_dev;
1065 struct ib_device *ibdev = dev->dev;
1068 if (!scsi_sglist(scmnd) ||
1069 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1070 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1073 if (dev->use_fast_reg) {
1074 struct srp_fr_desc **pfr;
1076 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1077 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1079 shost_printk(KERN_ERR, target->scsi_host, PFX
1080 "Queueing INV WR for rkey %#x failed (%d)\n",
1081 (*pfr)->mr->rkey, res);
1082 queue_work(system_long_wq,
1083 &target->tl_err_work);
1087 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1090 struct ib_pool_fmr **pfmr;
1092 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1093 ib_fmr_pool_unmap(*pfmr);
1096 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1097 scmnd->sc_data_direction);
1101 * srp_claim_req - Take ownership of the scmnd associated with a request.
1102 * @ch: SRP RDMA channel.
1103 * @req: SRP request.
1104 * @sdev: If not NULL, only take ownership for this SCSI device.
1105 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1106 * ownership of @req->scmnd if it equals @scmnd.
1109 * Either NULL or a pointer to the SCSI command the caller became owner of.
1111 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1112 struct srp_request *req,
1113 struct scsi_device *sdev,
1114 struct scsi_cmnd *scmnd)
1116 unsigned long flags;
1118 spin_lock_irqsave(&ch->lock, flags);
1120 (!sdev || req->scmnd->device == sdev) &&
1121 (!scmnd || req->scmnd == scmnd)) {
1127 spin_unlock_irqrestore(&ch->lock, flags);
1133 * srp_free_req() - Unmap data and add request to the free request list.
1134 * @ch: SRP RDMA channel.
1135 * @req: Request to be freed.
1136 * @scmnd: SCSI command associated with @req.
1137 * @req_lim_delta: Amount to be added to @target->req_lim.
1139 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1140 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1142 unsigned long flags;
1144 srp_unmap_data(scmnd, ch, req);
1146 spin_lock_irqsave(&ch->lock, flags);
1147 ch->req_lim += req_lim_delta;
1148 spin_unlock_irqrestore(&ch->lock, flags);
1151 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1152 struct scsi_device *sdev, int result)
1154 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1157 srp_free_req(ch, req, scmnd, 0);
1158 scmnd->result = result;
1159 scmnd->scsi_done(scmnd);
1163 static void srp_terminate_io(struct srp_rport *rport)
1165 struct srp_target_port *target = rport->lld_data;
1166 struct srp_rdma_ch *ch;
1167 struct Scsi_Host *shost = target->scsi_host;
1168 struct scsi_device *sdev;
1172 * Invoking srp_terminate_io() while srp_queuecommand() is running
1173 * is not safe. Hence the warning statement below.
1175 shost_for_each_device(sdev, shost)
1176 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1178 for (i = 0; i < target->ch_count; i++) {
1179 ch = &target->ch[i];
1181 for (j = 0; j < target->req_ring_size; ++j) {
1182 struct srp_request *req = &ch->req_ring[j];
1184 srp_finish_req(ch, req, NULL,
1185 DID_TRANSPORT_FAILFAST << 16);
1191 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1192 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1193 * srp_reset_device() or srp_reset_host() calls will occur while this function
1194 * is in progress. One way to realize that is not to call this function
1195 * directly but to call srp_reconnect_rport() instead since that last function
1196 * serializes calls of this function via rport->mutex and also blocks
1197 * srp_queuecommand() calls before invoking this function.
1199 static int srp_rport_reconnect(struct srp_rport *rport)
1201 struct srp_target_port *target = rport->lld_data;
1202 struct srp_rdma_ch *ch;
1204 bool multich = false;
1206 srp_disconnect_target(target);
1208 if (target->state == SRP_TARGET_SCANNING)
1212 * Now get a new local CM ID so that we avoid confusing the target in
1213 * case things are really fouled up. Doing so also ensures that all CM
1214 * callbacks will have finished before a new QP is allocated.
1216 for (i = 0; i < target->ch_count; i++) {
1217 ch = &target->ch[i];
1220 ret += srp_new_cm_id(ch);
1222 for (i = 0; i < target->ch_count; i++) {
1223 ch = &target->ch[i];
1226 for (j = 0; j < target->req_ring_size; ++j) {
1227 struct srp_request *req = &ch->req_ring[j];
1229 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1232 for (i = 0; i < target->ch_count; i++) {
1233 ch = &target->ch[i];
1237 * Whether or not creating a new CM ID succeeded, create a new
1238 * QP. This guarantees that all completion callback function
1239 * invocations have finished before request resetting starts.
1241 ret += srp_create_ch_ib(ch);
1243 INIT_LIST_HEAD(&ch->free_tx);
1244 for (j = 0; j < target->queue_size; ++j)
1245 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1247 for (i = 0; i < target->ch_count; i++) {
1248 ch = &target->ch[i];
1249 if (ret || !ch->target) {
1254 ret = srp_connect_ch(ch, multich);
1259 shost_printk(KERN_INFO, target->scsi_host,
1260 PFX "reconnect succeeded\n");
1265 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1266 unsigned int dma_len, u32 rkey)
1268 struct srp_direct_buf *desc = state->desc;
1270 desc->va = cpu_to_be64(dma_addr);
1271 desc->key = cpu_to_be32(rkey);
1272 desc->len = cpu_to_be32(dma_len);
1274 state->total_len += dma_len;
1279 static int srp_map_finish_fmr(struct srp_map_state *state,
1280 struct srp_rdma_ch *ch)
1282 struct ib_pool_fmr *fmr;
1285 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1286 state->npages, io_addr);
1288 return PTR_ERR(fmr);
1290 *state->next_fmr++ = fmr;
1293 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1298 static int srp_map_finish_fr(struct srp_map_state *state,
1299 struct srp_rdma_ch *ch)
1301 struct srp_target_port *target = ch->target;
1302 struct srp_device *dev = target->srp_host->srp_dev;
1303 struct ib_send_wr *bad_wr;
1304 struct ib_send_wr wr;
1305 struct srp_fr_desc *desc;
1308 desc = srp_fr_pool_get(ch->fr_pool);
1312 rkey = ib_inc_rkey(desc->mr->rkey);
1313 ib_update_fast_reg_key(desc->mr, rkey);
1315 memcpy(desc->frpl->page_list, state->pages,
1316 sizeof(state->pages[0]) * state->npages);
1318 memset(&wr, 0, sizeof(wr));
1319 wr.opcode = IB_WR_FAST_REG_MR;
1320 wr.wr_id = FAST_REG_WR_ID_MASK;
1321 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1322 wr.wr.fast_reg.page_list = desc->frpl;
1323 wr.wr.fast_reg.page_list_len = state->npages;
1324 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1325 wr.wr.fast_reg.length = state->dma_len;
1326 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1327 IB_ACCESS_REMOTE_READ |
1328 IB_ACCESS_REMOTE_WRITE);
1329 wr.wr.fast_reg.rkey = desc->mr->lkey;
1331 *state->next_fr++ = desc;
1334 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1337 return ib_post_send(ch->qp, &wr, &bad_wr);
1340 static int srp_finish_mapping(struct srp_map_state *state,
1341 struct srp_rdma_ch *ch)
1343 struct srp_target_port *target = ch->target;
1346 if (state->npages == 0)
1349 if (state->npages == 1 && !register_always)
1350 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1353 ret = target->srp_host->srp_dev->use_fast_reg ?
1354 srp_map_finish_fr(state, ch) :
1355 srp_map_finish_fmr(state, ch);
1365 static void srp_map_update_start(struct srp_map_state *state,
1366 struct scatterlist *sg, int sg_index,
1367 dma_addr_t dma_addr)
1369 state->unmapped_sg = sg;
1370 state->unmapped_index = sg_index;
1371 state->unmapped_addr = dma_addr;
1374 static int srp_map_sg_entry(struct srp_map_state *state,
1375 struct srp_rdma_ch *ch,
1376 struct scatterlist *sg, int sg_index,
1379 struct srp_target_port *target = ch->target;
1380 struct srp_device *dev = target->srp_host->srp_dev;
1381 struct ib_device *ibdev = dev->dev;
1382 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1383 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1392 * Once we're in direct map mode for a request, we don't
1393 * go back to FMR or FR mode, so no need to update anything
1394 * other than the descriptor.
1396 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1401 * Since not all RDMA HW drivers support non-zero page offsets for
1402 * FMR, if we start at an offset into a page, don't merge into the
1403 * current FMR mapping. Finish it out, and use the kernel's MR for
1406 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1407 dma_len > dev->mr_max_size) {
1408 ret = srp_finish_mapping(state, ch);
1412 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1413 srp_map_update_start(state, NULL, 0, 0);
1418 * If this is the first sg that will be mapped via FMR or via FR, save
1419 * our position. We need to know the first unmapped entry, its index,
1420 * and the first unmapped address within that entry to be able to
1421 * restart mapping after an error.
1423 if (!state->unmapped_sg)
1424 srp_map_update_start(state, sg, sg_index, dma_addr);
1427 unsigned offset = dma_addr & ~dev->mr_page_mask;
1428 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1429 ret = srp_finish_mapping(state, ch);
1433 srp_map_update_start(state, sg, sg_index, dma_addr);
1436 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1439 state->base_dma_addr = dma_addr;
1440 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1441 state->dma_len += len;
1447 * If the last entry of the MR wasn't a full page, then we need to
1448 * close it out and start a new one -- we can only merge at page
1452 if (len != dev->mr_page_size) {
1453 ret = srp_finish_mapping(state, ch);
1455 srp_map_update_start(state, NULL, 0, 0);
1460 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1461 struct srp_request *req, struct scatterlist *scat,
1464 struct srp_target_port *target = ch->target;
1465 struct srp_device *dev = target->srp_host->srp_dev;
1466 struct ib_device *ibdev = dev->dev;
1467 struct scatterlist *sg;
1471 state->desc = req->indirect_desc;
1472 state->pages = req->map_page;
1473 if (dev->use_fast_reg) {
1474 state->next_fr = req->fr_list;
1475 use_mr = !!ch->fr_pool;
1477 state->next_fmr = req->fmr_list;
1478 use_mr = !!ch->fmr_pool;
1481 for_each_sg(scat, sg, count, i) {
1482 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1484 * Memory registration failed, so backtrack to the
1485 * first unmapped entry and continue on without using
1486 * memory registration.
1488 dma_addr_t dma_addr;
1489 unsigned int dma_len;
1492 sg = state->unmapped_sg;
1493 i = state->unmapped_index;
1495 dma_addr = ib_sg_dma_address(ibdev, sg);
1496 dma_len = ib_sg_dma_len(ibdev, sg);
1497 dma_len -= (state->unmapped_addr - dma_addr);
1498 dma_addr = state->unmapped_addr;
1500 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1504 if (use_mr && srp_finish_mapping(state, ch))
1507 req->nmdesc = state->nmdesc;
1512 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1513 struct srp_request *req)
1515 struct srp_target_port *target = ch->target;
1516 struct scatterlist *scat;
1517 struct srp_cmd *cmd = req->cmd->buf;
1518 int len, nents, count;
1519 struct srp_device *dev;
1520 struct ib_device *ibdev;
1521 struct srp_map_state state;
1522 struct srp_indirect_buf *indirect_hdr;
1526 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1527 return sizeof (struct srp_cmd);
1529 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1530 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1531 shost_printk(KERN_WARNING, target->scsi_host,
1532 PFX "Unhandled data direction %d\n",
1533 scmnd->sc_data_direction);
1537 nents = scsi_sg_count(scmnd);
1538 scat = scsi_sglist(scmnd);
1540 dev = target->srp_host->srp_dev;
1543 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1544 if (unlikely(count == 0))
1547 fmt = SRP_DATA_DESC_DIRECT;
1548 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1550 if (count == 1 && !register_always) {
1552 * The midlayer only generated a single gather/scatter
1553 * entry, or DMA mapping coalesced everything to a
1554 * single entry. So a direct descriptor along with
1555 * the DMA MR suffices.
1557 struct srp_direct_buf *buf = (void *) cmd->add_data;
1559 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1560 buf->key = cpu_to_be32(target->rkey);
1561 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1568 * We have more than one scatter/gather entry, so build our indirect
1569 * descriptor table, trying to merge as many entries as we can.
1571 indirect_hdr = (void *) cmd->add_data;
1573 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1574 target->indirect_size, DMA_TO_DEVICE);
1576 memset(&state, 0, sizeof(state));
1577 srp_map_sg(&state, ch, req, scat, count);
1579 /* We've mapped the request, now pull as much of the indirect
1580 * descriptor table as we can into the command buffer. If this
1581 * target is not using an external indirect table, we are
1582 * guaranteed to fit into the command, as the SCSI layer won't
1583 * give us more S/G entries than we allow.
1585 if (state.ndesc == 1) {
1587 * Memory registration collapsed the sg-list into one entry,
1588 * so use a direct descriptor.
1590 struct srp_direct_buf *buf = (void *) cmd->add_data;
1592 *buf = req->indirect_desc[0];
1596 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1597 !target->allow_ext_sg)) {
1598 shost_printk(KERN_ERR, target->scsi_host,
1599 "Could not fit S/G list into SRP_CMD\n");
1603 count = min(state.ndesc, target->cmd_sg_cnt);
1604 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1606 fmt = SRP_DATA_DESC_INDIRECT;
1607 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1608 len += count * sizeof (struct srp_direct_buf);
1610 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1611 count * sizeof (struct srp_direct_buf));
1613 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1614 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1615 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1616 indirect_hdr->len = cpu_to_be32(state.total_len);
1618 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1619 cmd->data_out_desc_cnt = count;
1621 cmd->data_in_desc_cnt = count;
1623 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1627 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1628 cmd->buf_fmt = fmt << 4;
1636 * Return an IU and possible credit to the free pool
1638 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1639 enum srp_iu_type iu_type)
1641 unsigned long flags;
1643 spin_lock_irqsave(&ch->lock, flags);
1644 list_add(&iu->list, &ch->free_tx);
1645 if (iu_type != SRP_IU_RSP)
1647 spin_unlock_irqrestore(&ch->lock, flags);
1651 * Must be called with ch->lock held to protect req_lim and free_tx.
1652 * If IU is not sent, it must be returned using srp_put_tx_iu().
1655 * An upper limit for the number of allocated information units for each
1657 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1658 * more than Scsi_Host.can_queue requests.
1659 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1660 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1661 * one unanswered SRP request to an initiator.
1663 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1664 enum srp_iu_type iu_type)
1666 struct srp_target_port *target = ch->target;
1667 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1670 srp_send_completion(ch->send_cq, ch);
1672 if (list_empty(&ch->free_tx))
1675 /* Initiator responses to target requests do not consume credits */
1676 if (iu_type != SRP_IU_RSP) {
1677 if (ch->req_lim <= rsv) {
1678 ++target->zero_req_lim;
1685 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1686 list_del(&iu->list);
1690 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1692 struct srp_target_port *target = ch->target;
1694 struct ib_send_wr wr, *bad_wr;
1696 list.addr = iu->dma;
1698 list.lkey = target->lkey;
1701 wr.wr_id = (uintptr_t) iu;
1704 wr.opcode = IB_WR_SEND;
1705 wr.send_flags = IB_SEND_SIGNALED;
1707 return ib_post_send(ch->qp, &wr, &bad_wr);
1710 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1712 struct srp_target_port *target = ch->target;
1713 struct ib_recv_wr wr, *bad_wr;
1716 list.addr = iu->dma;
1717 list.length = iu->size;
1718 list.lkey = target->lkey;
1721 wr.wr_id = (uintptr_t) iu;
1725 return ib_post_recv(ch->qp, &wr, &bad_wr);
1728 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1730 struct srp_target_port *target = ch->target;
1731 struct srp_request *req;
1732 struct scsi_cmnd *scmnd;
1733 unsigned long flags;
1735 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1736 spin_lock_irqsave(&ch->lock, flags);
1737 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1738 spin_unlock_irqrestore(&ch->lock, flags);
1740 ch->tsk_mgmt_status = -1;
1741 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1742 ch->tsk_mgmt_status = rsp->data[3];
1743 complete(&ch->tsk_mgmt_done);
1745 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1747 req = (void *)scmnd->host_scribble;
1748 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1751 shost_printk(KERN_ERR, target->scsi_host,
1752 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1753 rsp->tag, ch - target->ch, ch->qp->qp_num);
1755 spin_lock_irqsave(&ch->lock, flags);
1756 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1757 spin_unlock_irqrestore(&ch->lock, flags);
1761 scmnd->result = rsp->status;
1763 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1764 memcpy(scmnd->sense_buffer, rsp->data +
1765 be32_to_cpu(rsp->resp_data_len),
1766 min_t(int, be32_to_cpu(rsp->sense_data_len),
1767 SCSI_SENSE_BUFFERSIZE));
1770 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1771 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1773 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1775 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1776 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1777 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1779 srp_free_req(ch, req, scmnd,
1780 be32_to_cpu(rsp->req_lim_delta));
1782 scmnd->host_scribble = NULL;
1783 scmnd->scsi_done(scmnd);
1787 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1790 struct srp_target_port *target = ch->target;
1791 struct ib_device *dev = target->srp_host->srp_dev->dev;
1792 unsigned long flags;
1796 spin_lock_irqsave(&ch->lock, flags);
1797 ch->req_lim += req_delta;
1798 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1799 spin_unlock_irqrestore(&ch->lock, flags);
1802 shost_printk(KERN_ERR, target->scsi_host, PFX
1803 "no IU available to send response\n");
1807 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1808 memcpy(iu->buf, rsp, len);
1809 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1811 err = srp_post_send(ch, iu, len);
1813 shost_printk(KERN_ERR, target->scsi_host, PFX
1814 "unable to post response: %d\n", err);
1815 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1821 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1822 struct srp_cred_req *req)
1824 struct srp_cred_rsp rsp = {
1825 .opcode = SRP_CRED_RSP,
1828 s32 delta = be32_to_cpu(req->req_lim_delta);
1830 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1831 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1832 "problems processing SRP_CRED_REQ\n");
1835 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1836 struct srp_aer_req *req)
1838 struct srp_target_port *target = ch->target;
1839 struct srp_aer_rsp rsp = {
1840 .opcode = SRP_AER_RSP,
1843 s32 delta = be32_to_cpu(req->req_lim_delta);
1845 shost_printk(KERN_ERR, target->scsi_host, PFX
1846 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1848 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1849 shost_printk(KERN_ERR, target->scsi_host, PFX
1850 "problems processing SRP_AER_REQ\n");
1853 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1855 struct srp_target_port *target = ch->target;
1856 struct ib_device *dev = target->srp_host->srp_dev->dev;
1857 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1861 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1864 opcode = *(u8 *) iu->buf;
1867 shost_printk(KERN_ERR, target->scsi_host,
1868 PFX "recv completion, opcode 0x%02x\n", opcode);
1869 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1870 iu->buf, wc->byte_len, true);
1875 srp_process_rsp(ch, iu->buf);
1879 srp_process_cred_req(ch, iu->buf);
1883 srp_process_aer_req(ch, iu->buf);
1887 /* XXX Handle target logout */
1888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Got target logout request\n");
1893 shost_printk(KERN_WARNING, target->scsi_host,
1894 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1898 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1901 res = srp_post_recv(ch, iu);
1903 shost_printk(KERN_ERR, target->scsi_host,
1904 PFX "Recv failed with error code %d\n", res);
1908 * srp_tl_err_work() - handle a transport layer error
1909 * @work: Work structure embedded in an SRP target port.
1911 * Note: This function may get invoked before the rport has been created,
1912 * hence the target->rport test.
1914 static void srp_tl_err_work(struct work_struct *work)
1916 struct srp_target_port *target;
1918 target = container_of(work, struct srp_target_port, tl_err_work);
1920 srp_start_tl_fail_timers(target->rport);
1923 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1924 bool send_err, struct srp_rdma_ch *ch)
1926 struct srp_target_port *target = ch->target;
1928 if (wr_id == SRP_LAST_WR_ID) {
1929 complete(&ch->done);
1933 if (target->connected && !target->qp_in_error) {
1934 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1935 shost_printk(KERN_ERR, target->scsi_host, PFX
1936 "LOCAL_INV failed with status %s (%d)\n",
1937 ib_wc_status_msg(wc_status), wc_status);
1938 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1939 shost_printk(KERN_ERR, target->scsi_host, PFX
1940 "FAST_REG_MR failed status %s (%d)\n",
1941 ib_wc_status_msg(wc_status), wc_status);
1943 shost_printk(KERN_ERR, target->scsi_host,
1944 PFX "failed %s status %s (%d) for iu %p\n",
1945 send_err ? "send" : "receive",
1946 ib_wc_status_msg(wc_status), wc_status,
1947 (void *)(uintptr_t)wr_id);
1949 queue_work(system_long_wq, &target->tl_err_work);
1951 target->qp_in_error = true;
1954 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1956 struct srp_rdma_ch *ch = ch_ptr;
1959 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1960 while (ib_poll_cq(cq, 1, &wc) > 0) {
1961 if (likely(wc.status == IB_WC_SUCCESS)) {
1962 srp_handle_recv(ch, &wc);
1964 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1969 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1971 struct srp_rdma_ch *ch = ch_ptr;
1975 while (ib_poll_cq(cq, 1, &wc) > 0) {
1976 if (likely(wc.status == IB_WC_SUCCESS)) {
1977 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1978 list_add(&iu->list, &ch->free_tx);
1980 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1985 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1987 struct srp_target_port *target = host_to_target(shost);
1988 struct srp_rport *rport = target->rport;
1989 struct srp_rdma_ch *ch;
1990 struct srp_request *req;
1992 struct srp_cmd *cmd;
1993 struct ib_device *dev;
1994 unsigned long flags;
1998 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2001 * The SCSI EH thread is the only context from which srp_queuecommand()
2002 * can get invoked for blocked devices (SDEV_BLOCK /
2003 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2004 * locking the rport mutex if invoked from inside the SCSI EH.
2007 mutex_lock(&rport->mutex);
2009 scmnd->result = srp_chkready(target->rport);
2010 if (unlikely(scmnd->result))
2013 WARN_ON_ONCE(scmnd->request->tag < 0);
2014 tag = blk_mq_unique_tag(scmnd->request);
2015 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2016 idx = blk_mq_unique_tag_to_tag(tag);
2017 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2018 dev_name(&shost->shost_gendev), tag, idx,
2019 target->req_ring_size);
2021 spin_lock_irqsave(&ch->lock, flags);
2022 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2023 spin_unlock_irqrestore(&ch->lock, flags);
2028 req = &ch->req_ring[idx];
2029 dev = target->srp_host->srp_dev->dev;
2030 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2033 scmnd->host_scribble = (void *) req;
2036 memset(cmd, 0, sizeof *cmd);
2038 cmd->opcode = SRP_CMD;
2039 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
2041 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2046 len = srp_map_data(scmnd, ch, req);
2048 shost_printk(KERN_ERR, target->scsi_host,
2049 PFX "Failed to map data (%d)\n", len);
2051 * If we ran out of memory descriptors (-ENOMEM) because an
2052 * application is queuing many requests with more than
2053 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2054 * to reduce queue depth temporarily.
2056 scmnd->result = len == -ENOMEM ?
2057 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2061 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2064 if (srp_post_send(ch, iu, len)) {
2065 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2073 mutex_unlock(&rport->mutex);
2078 srp_unmap_data(scmnd, ch, req);
2081 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2084 * Avoid that the loops that iterate over the request ring can
2085 * encounter a dangling SCSI command pointer.
2090 if (scmnd->result) {
2091 scmnd->scsi_done(scmnd);
2094 ret = SCSI_MLQUEUE_HOST_BUSY;
2101 * Note: the resources allocated in this function are freed in
2104 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2106 struct srp_target_port *target = ch->target;
2109 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2113 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2118 for (i = 0; i < target->queue_size; ++i) {
2119 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2121 GFP_KERNEL, DMA_FROM_DEVICE);
2122 if (!ch->rx_ring[i])
2126 for (i = 0; i < target->queue_size; ++i) {
2127 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2129 GFP_KERNEL, DMA_TO_DEVICE);
2130 if (!ch->tx_ring[i])
2133 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2139 for (i = 0; i < target->queue_size; ++i) {
2140 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2141 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2154 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2156 uint64_t T_tr_ns, max_compl_time_ms;
2157 uint32_t rq_tmo_jiffies;
2160 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2161 * table 91), both the QP timeout and the retry count have to be set
2162 * for RC QP's during the RTR to RTS transition.
2164 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2165 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2168 * Set target->rq_tmo_jiffies to one second more than the largest time
2169 * it can take before an error completion is generated. See also
2170 * C9-140..142 in the IBTA spec for more information about how to
2171 * convert the QP Local ACK Timeout value to nanoseconds.
2173 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2174 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2175 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2176 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2178 return rq_tmo_jiffies;
2181 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2182 struct srp_login_rsp *lrsp,
2183 struct srp_rdma_ch *ch)
2185 struct srp_target_port *target = ch->target;
2186 struct ib_qp_attr *qp_attr = NULL;
2191 if (lrsp->opcode == SRP_LOGIN_RSP) {
2192 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2193 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2196 * Reserve credits for task management so we don't
2197 * bounce requests back to the SCSI mid-layer.
2199 target->scsi_host->can_queue
2200 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2201 target->scsi_host->can_queue);
2202 target->scsi_host->cmd_per_lun
2203 = min_t(int, target->scsi_host->can_queue,
2204 target->scsi_host->cmd_per_lun);
2206 shost_printk(KERN_WARNING, target->scsi_host,
2207 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2213 ret = srp_alloc_iu_bufs(ch);
2219 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2223 qp_attr->qp_state = IB_QPS_RTR;
2224 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2228 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2232 for (i = 0; i < target->queue_size; i++) {
2233 struct srp_iu *iu = ch->rx_ring[i];
2235 ret = srp_post_recv(ch, iu);
2240 qp_attr->qp_state = IB_QPS_RTS;
2241 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2245 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2247 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2251 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2260 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2261 struct ib_cm_event *event,
2262 struct srp_rdma_ch *ch)
2264 struct srp_target_port *target = ch->target;
2265 struct Scsi_Host *shost = target->scsi_host;
2266 struct ib_class_port_info *cpi;
2269 switch (event->param.rej_rcvd.reason) {
2270 case IB_CM_REJ_PORT_CM_REDIRECT:
2271 cpi = event->param.rej_rcvd.ari;
2272 ch->path.dlid = cpi->redirect_lid;
2273 ch->path.pkey = cpi->redirect_pkey;
2274 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2275 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2277 ch->status = ch->path.dlid ?
2278 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2281 case IB_CM_REJ_PORT_REDIRECT:
2282 if (srp_target_is_topspin(target)) {
2284 * Topspin/Cisco SRP gateways incorrectly send
2285 * reject reason code 25 when they mean 24
2288 memcpy(ch->path.dgid.raw,
2289 event->param.rej_rcvd.ari, 16);
2291 shost_printk(KERN_DEBUG, shost,
2292 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2293 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2294 be64_to_cpu(ch->path.dgid.global.interface_id));
2296 ch->status = SRP_PORT_REDIRECT;
2298 shost_printk(KERN_WARNING, shost,
2299 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2300 ch->status = -ECONNRESET;
2304 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2305 shost_printk(KERN_WARNING, shost,
2306 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2307 ch->status = -ECONNRESET;
2310 case IB_CM_REJ_CONSUMER_DEFINED:
2311 opcode = *(u8 *) event->private_data;
2312 if (opcode == SRP_LOGIN_REJ) {
2313 struct srp_login_rej *rej = event->private_data;
2314 u32 reason = be32_to_cpu(rej->reason);
2316 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2317 shost_printk(KERN_WARNING, shost,
2318 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2320 shost_printk(KERN_WARNING, shost, PFX
2321 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2323 target->orig_dgid.raw, reason);
2325 shost_printk(KERN_WARNING, shost,
2326 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2327 " opcode 0x%02x\n", opcode);
2328 ch->status = -ECONNRESET;
2331 case IB_CM_REJ_STALE_CONN:
2332 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2333 ch->status = SRP_STALE_CONN;
2337 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2338 event->param.rej_rcvd.reason);
2339 ch->status = -ECONNRESET;
2343 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2345 struct srp_rdma_ch *ch = cm_id->context;
2346 struct srp_target_port *target = ch->target;
2349 switch (event->event) {
2350 case IB_CM_REQ_ERROR:
2351 shost_printk(KERN_DEBUG, target->scsi_host,
2352 PFX "Sending CM REQ failed\n");
2354 ch->status = -ECONNRESET;
2357 case IB_CM_REP_RECEIVED:
2359 srp_cm_rep_handler(cm_id, event->private_data, ch);
2362 case IB_CM_REJ_RECEIVED:
2363 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2366 srp_cm_rej_handler(cm_id, event, ch);
2369 case IB_CM_DREQ_RECEIVED:
2370 shost_printk(KERN_WARNING, target->scsi_host,
2371 PFX "DREQ received - connection closed\n");
2372 srp_change_conn_state(target, false);
2373 if (ib_send_cm_drep(cm_id, NULL, 0))
2374 shost_printk(KERN_ERR, target->scsi_host,
2375 PFX "Sending CM DREP failed\n");
2376 queue_work(system_long_wq, &target->tl_err_work);
2379 case IB_CM_TIMEWAIT_EXIT:
2380 shost_printk(KERN_ERR, target->scsi_host,
2381 PFX "connection closed\n");
2387 case IB_CM_MRA_RECEIVED:
2388 case IB_CM_DREQ_ERROR:
2389 case IB_CM_DREP_RECEIVED:
2393 shost_printk(KERN_WARNING, target->scsi_host,
2394 PFX "Unhandled CM event %d\n", event->event);
2399 complete(&ch->done);
2405 * srp_change_queue_depth - setting device queue depth
2406 * @sdev: scsi device struct
2407 * @qdepth: requested queue depth
2409 * Returns queue depth.
2412 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2414 if (!sdev->tagged_supported)
2416 return scsi_change_queue_depth(sdev, qdepth);
2419 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2420 unsigned int lun, u8 func)
2422 struct srp_target_port *target = ch->target;
2423 struct srp_rport *rport = target->rport;
2424 struct ib_device *dev = target->srp_host->srp_dev->dev;
2426 struct srp_tsk_mgmt *tsk_mgmt;
2428 if (!target->connected || target->qp_in_error)
2431 init_completion(&ch->tsk_mgmt_done);
2434 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2435 * invoked while a task management function is being sent.
2437 mutex_lock(&rport->mutex);
2438 spin_lock_irq(&ch->lock);
2439 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2440 spin_unlock_irq(&ch->lock);
2443 mutex_unlock(&rport->mutex);
2448 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2451 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2453 tsk_mgmt->opcode = SRP_TSK_MGMT;
2454 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2455 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2456 tsk_mgmt->tsk_mgmt_func = func;
2457 tsk_mgmt->task_tag = req_tag;
2459 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2461 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2462 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2463 mutex_unlock(&rport->mutex);
2467 mutex_unlock(&rport->mutex);
2469 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2470 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2476 static int srp_abort(struct scsi_cmnd *scmnd)
2478 struct srp_target_port *target = host_to_target(scmnd->device->host);
2479 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2482 struct srp_rdma_ch *ch;
2485 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2489 tag = blk_mq_unique_tag(scmnd->request);
2490 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2491 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2493 ch = &target->ch[ch_idx];
2494 if (!srp_claim_req(ch, req, NULL, scmnd))
2496 shost_printk(KERN_ERR, target->scsi_host,
2497 "Sending SRP abort for tag %#x\n", tag);
2498 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2499 SRP_TSK_ABORT_TASK) == 0)
2501 else if (target->rport->state == SRP_RPORT_LOST)
2505 srp_free_req(ch, req, scmnd, 0);
2506 scmnd->result = DID_ABORT << 16;
2507 scmnd->scsi_done(scmnd);
2512 static int srp_reset_device(struct scsi_cmnd *scmnd)
2514 struct srp_target_port *target = host_to_target(scmnd->device->host);
2515 struct srp_rdma_ch *ch;
2518 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2520 ch = &target->ch[0];
2521 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2524 if (ch->tsk_mgmt_status)
2527 for (i = 0; i < target->ch_count; i++) {
2528 ch = &target->ch[i];
2529 for (i = 0; i < target->req_ring_size; ++i) {
2530 struct srp_request *req = &ch->req_ring[i];
2532 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2539 static int srp_reset_host(struct scsi_cmnd *scmnd)
2541 struct srp_target_port *target = host_to_target(scmnd->device->host);
2543 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2545 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2548 static int srp_slave_configure(struct scsi_device *sdev)
2550 struct Scsi_Host *shost = sdev->host;
2551 struct srp_target_port *target = host_to_target(shost);
2552 struct request_queue *q = sdev->request_queue;
2553 unsigned long timeout;
2555 if (sdev->type == TYPE_DISK) {
2556 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2557 blk_queue_rq_timeout(q, timeout);
2563 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2566 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2568 return sprintf(buf, "0x%016llx\n",
2569 (unsigned long long) be64_to_cpu(target->id_ext));
2572 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2575 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2577 return sprintf(buf, "0x%016llx\n",
2578 (unsigned long long) be64_to_cpu(target->ioc_guid));
2581 static ssize_t show_service_id(struct device *dev,
2582 struct device_attribute *attr, char *buf)
2584 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2586 return sprintf(buf, "0x%016llx\n",
2587 (unsigned long long) be64_to_cpu(target->service_id));
2590 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2593 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2595 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2598 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2603 return sprintf(buf, "%pI6\n", target->sgid.raw);
2606 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2609 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2610 struct srp_rdma_ch *ch = &target->ch[0];
2612 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2615 static ssize_t show_orig_dgid(struct device *dev,
2616 struct device_attribute *attr, char *buf)
2618 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2620 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2623 static ssize_t show_req_lim(struct device *dev,
2624 struct device_attribute *attr, char *buf)
2626 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2627 struct srp_rdma_ch *ch;
2628 int i, req_lim = INT_MAX;
2630 for (i = 0; i < target->ch_count; i++) {
2631 ch = &target->ch[i];
2632 req_lim = min(req_lim, ch->req_lim);
2634 return sprintf(buf, "%d\n", req_lim);
2637 static ssize_t show_zero_req_lim(struct device *dev,
2638 struct device_attribute *attr, char *buf)
2640 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2642 return sprintf(buf, "%d\n", target->zero_req_lim);
2645 static ssize_t show_local_ib_port(struct device *dev,
2646 struct device_attribute *attr, char *buf)
2648 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2650 return sprintf(buf, "%d\n", target->srp_host->port);
2653 static ssize_t show_local_ib_device(struct device *dev,
2654 struct device_attribute *attr, char *buf)
2656 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2658 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2661 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2664 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2666 return sprintf(buf, "%d\n", target->ch_count);
2669 static ssize_t show_comp_vector(struct device *dev,
2670 struct device_attribute *attr, char *buf)
2672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2674 return sprintf(buf, "%d\n", target->comp_vector);
2677 static ssize_t show_tl_retry_count(struct device *dev,
2678 struct device_attribute *attr, char *buf)
2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2682 return sprintf(buf, "%d\n", target->tl_retry_count);
2685 static ssize_t show_cmd_sg_entries(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2690 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2693 static ssize_t show_allow_ext_sg(struct device *dev,
2694 struct device_attribute *attr, char *buf)
2696 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2698 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2701 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2702 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2703 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2704 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2705 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2706 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2707 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2708 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2709 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2710 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2711 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2712 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2713 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2714 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2715 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2716 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2718 static struct device_attribute *srp_host_attrs[] = {
2721 &dev_attr_service_id,
2725 &dev_attr_orig_dgid,
2727 &dev_attr_zero_req_lim,
2728 &dev_attr_local_ib_port,
2729 &dev_attr_local_ib_device,
2731 &dev_attr_comp_vector,
2732 &dev_attr_tl_retry_count,
2733 &dev_attr_cmd_sg_entries,
2734 &dev_attr_allow_ext_sg,
2738 static struct scsi_host_template srp_template = {
2739 .module = THIS_MODULE,
2740 .name = "InfiniBand SRP initiator",
2741 .proc_name = DRV_NAME,
2742 .slave_configure = srp_slave_configure,
2743 .info = srp_target_info,
2744 .queuecommand = srp_queuecommand,
2745 .change_queue_depth = srp_change_queue_depth,
2746 .eh_abort_handler = srp_abort,
2747 .eh_device_reset_handler = srp_reset_device,
2748 .eh_host_reset_handler = srp_reset_host,
2749 .skip_settle_delay = true,
2750 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2751 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2753 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2754 .use_clustering = ENABLE_CLUSTERING,
2755 .shost_attrs = srp_host_attrs,
2757 .track_queue_depth = 1,
2760 static int srp_sdev_count(struct Scsi_Host *host)
2762 struct scsi_device *sdev;
2765 shost_for_each_device(sdev, host)
2771 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2773 struct srp_rport_identifiers ids;
2774 struct srp_rport *rport;
2776 target->state = SRP_TARGET_SCANNING;
2777 sprintf(target->target_name, "SRP.T10:%016llX",
2778 (unsigned long long) be64_to_cpu(target->id_ext));
2780 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2783 memcpy(ids.port_id, &target->id_ext, 8);
2784 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2785 ids.roles = SRP_RPORT_ROLE_TARGET;
2786 rport = srp_rport_add(target->scsi_host, &ids);
2787 if (IS_ERR(rport)) {
2788 scsi_remove_host(target->scsi_host);
2789 return PTR_ERR(rport);
2792 rport->lld_data = target;
2793 target->rport = rport;
2795 spin_lock(&host->target_lock);
2796 list_add_tail(&target->list, &host->target_list);
2797 spin_unlock(&host->target_lock);
2799 scsi_scan_target(&target->scsi_host->shost_gendev,
2800 0, target->scsi_id, SCAN_WILD_CARD, 0);
2802 if (!target->connected || target->qp_in_error) {
2803 shost_printk(KERN_INFO, target->scsi_host,
2804 PFX "SCSI scan failed - removing SCSI host\n");
2805 srp_queue_remove_work(target);
2809 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2810 dev_name(&target->scsi_host->shost_gendev),
2811 srp_sdev_count(target->scsi_host));
2813 spin_lock_irq(&target->lock);
2814 if (target->state == SRP_TARGET_SCANNING)
2815 target->state = SRP_TARGET_LIVE;
2816 spin_unlock_irq(&target->lock);
2822 static void srp_release_dev(struct device *dev)
2824 struct srp_host *host =
2825 container_of(dev, struct srp_host, dev);
2827 complete(&host->released);
2830 static struct class srp_class = {
2831 .name = "infiniband_srp",
2832 .dev_release = srp_release_dev
2836 * srp_conn_unique() - check whether the connection to a target is unique
2838 * @target: SRP target port.
2840 static bool srp_conn_unique(struct srp_host *host,
2841 struct srp_target_port *target)
2843 struct srp_target_port *t;
2846 if (target->state == SRP_TARGET_REMOVED)
2851 spin_lock(&host->target_lock);
2852 list_for_each_entry(t, &host->target_list, list) {
2854 target->id_ext == t->id_ext &&
2855 target->ioc_guid == t->ioc_guid &&
2856 target->initiator_ext == t->initiator_ext) {
2861 spin_unlock(&host->target_lock);
2868 * Target ports are added by writing
2870 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2871 * pkey=<P_Key>,service_id=<service ID>
2873 * to the add_target sysfs attribute.
2877 SRP_OPT_ID_EXT = 1 << 0,
2878 SRP_OPT_IOC_GUID = 1 << 1,
2879 SRP_OPT_DGID = 1 << 2,
2880 SRP_OPT_PKEY = 1 << 3,
2881 SRP_OPT_SERVICE_ID = 1 << 4,
2882 SRP_OPT_MAX_SECT = 1 << 5,
2883 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2884 SRP_OPT_IO_CLASS = 1 << 7,
2885 SRP_OPT_INITIATOR_EXT = 1 << 8,
2886 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2887 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2888 SRP_OPT_SG_TABLESIZE = 1 << 11,
2889 SRP_OPT_COMP_VECTOR = 1 << 12,
2890 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2891 SRP_OPT_QUEUE_SIZE = 1 << 14,
2892 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2896 SRP_OPT_SERVICE_ID),
2899 static const match_table_t srp_opt_tokens = {
2900 { SRP_OPT_ID_EXT, "id_ext=%s" },
2901 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2902 { SRP_OPT_DGID, "dgid=%s" },
2903 { SRP_OPT_PKEY, "pkey=%x" },
2904 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2905 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2906 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2907 { SRP_OPT_IO_CLASS, "io_class=%x" },
2908 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2909 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2910 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2911 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2912 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2913 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2914 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2915 { SRP_OPT_ERR, NULL }
2918 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2920 char *options, *sep_opt;
2923 substring_t args[MAX_OPT_ARGS];
2929 options = kstrdup(buf, GFP_KERNEL);
2934 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2938 token = match_token(p, srp_opt_tokens, args);
2942 case SRP_OPT_ID_EXT:
2943 p = match_strdup(args);
2948 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2952 case SRP_OPT_IOC_GUID:
2953 p = match_strdup(args);
2958 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2963 p = match_strdup(args);
2968 if (strlen(p) != 32) {
2969 pr_warn("bad dest GID parameter '%s'\n", p);
2974 for (i = 0; i < 16; ++i) {
2975 strlcpy(dgid, p + i * 2, sizeof(dgid));
2976 if (sscanf(dgid, "%hhx",
2977 &target->orig_dgid.raw[i]) < 1) {
2987 if (match_hex(args, &token)) {
2988 pr_warn("bad P_Key parameter '%s'\n", p);
2991 target->pkey = cpu_to_be16(token);
2994 case SRP_OPT_SERVICE_ID:
2995 p = match_strdup(args);
3000 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3004 case SRP_OPT_MAX_SECT:
3005 if (match_int(args, &token)) {
3006 pr_warn("bad max sect parameter '%s'\n", p);
3009 target->scsi_host->max_sectors = token;
3012 case SRP_OPT_QUEUE_SIZE:
3013 if (match_int(args, &token) || token < 1) {
3014 pr_warn("bad queue_size parameter '%s'\n", p);
3017 target->scsi_host->can_queue = token;
3018 target->queue_size = token + SRP_RSP_SQ_SIZE +
3019 SRP_TSK_MGMT_SQ_SIZE;
3020 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3021 target->scsi_host->cmd_per_lun = token;
3024 case SRP_OPT_MAX_CMD_PER_LUN:
3025 if (match_int(args, &token) || token < 1) {
3026 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3030 target->scsi_host->cmd_per_lun = token;
3033 case SRP_OPT_IO_CLASS:
3034 if (match_hex(args, &token)) {
3035 pr_warn("bad IO class parameter '%s'\n", p);
3038 if (token != SRP_REV10_IB_IO_CLASS &&
3039 token != SRP_REV16A_IB_IO_CLASS) {
3040 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3041 token, SRP_REV10_IB_IO_CLASS,
3042 SRP_REV16A_IB_IO_CLASS);
3045 target->io_class = token;
3048 case SRP_OPT_INITIATOR_EXT:
3049 p = match_strdup(args);
3054 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3058 case SRP_OPT_CMD_SG_ENTRIES:
3059 if (match_int(args, &token) || token < 1 || token > 255) {
3060 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3064 target->cmd_sg_cnt = token;
3067 case SRP_OPT_ALLOW_EXT_SG:
3068 if (match_int(args, &token)) {
3069 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3072 target->allow_ext_sg = !!token;
3075 case SRP_OPT_SG_TABLESIZE:
3076 if (match_int(args, &token) || token < 1 ||
3077 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3078 pr_warn("bad max sg_tablesize parameter '%s'\n",
3082 target->sg_tablesize = token;
3085 case SRP_OPT_COMP_VECTOR:
3086 if (match_int(args, &token) || token < 0) {
3087 pr_warn("bad comp_vector parameter '%s'\n", p);
3090 target->comp_vector = token;
3093 case SRP_OPT_TL_RETRY_COUNT:
3094 if (match_int(args, &token) || token < 2 || token > 7) {
3095 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3099 target->tl_retry_count = token;
3103 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3109 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3112 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3113 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3114 !(srp_opt_tokens[i].token & opt_mask))
3115 pr_warn("target creation request is missing parameter '%s'\n",
3116 srp_opt_tokens[i].pattern);
3118 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3119 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3120 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3121 target->scsi_host->cmd_per_lun,
3122 target->scsi_host->can_queue);
3129 static ssize_t srp_create_target(struct device *dev,
3130 struct device_attribute *attr,
3131 const char *buf, size_t count)
3133 struct srp_host *host =
3134 container_of(dev, struct srp_host, dev);
3135 struct Scsi_Host *target_host;
3136 struct srp_target_port *target;
3137 struct srp_rdma_ch *ch;
3138 struct srp_device *srp_dev = host->srp_dev;
3139 struct ib_device *ibdev = srp_dev->dev;
3140 int ret, node_idx, node, cpu, i;
3141 bool multich = false;
3143 target_host = scsi_host_alloc(&srp_template,
3144 sizeof (struct srp_target_port));
3148 target_host->transportt = ib_srp_transport_template;
3149 target_host->max_channel = 0;
3150 target_host->max_id = 1;
3151 target_host->max_lun = SRP_MAX_LUN;
3152 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3154 target = host_to_target(target_host);
3156 target->io_class = SRP_REV16A_IB_IO_CLASS;
3157 target->scsi_host = target_host;
3158 target->srp_host = host;
3159 target->lkey = host->srp_dev->mr->lkey;
3160 target->rkey = host->srp_dev->mr->rkey;
3161 target->cmd_sg_cnt = cmd_sg_entries;
3162 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3163 target->allow_ext_sg = allow_ext_sg;
3164 target->tl_retry_count = 7;
3165 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3168 * Avoid that the SCSI host can be removed by srp_remove_target()
3169 * before this function returns.
3171 scsi_host_get(target->scsi_host);
3173 mutex_lock(&host->add_target_mutex);
3175 ret = srp_parse_options(buf, target);
3179 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3183 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3185 if (!srp_conn_unique(target->srp_host, target)) {
3186 shost_printk(KERN_INFO, target->scsi_host,
3187 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3188 be64_to_cpu(target->id_ext),
3189 be64_to_cpu(target->ioc_guid),
3190 be64_to_cpu(target->initiator_ext));
3195 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3196 target->cmd_sg_cnt < target->sg_tablesize) {
3197 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3198 target->sg_tablesize = target->cmd_sg_cnt;
3201 target_host->sg_tablesize = target->sg_tablesize;
3202 target->indirect_size = target->sg_tablesize *
3203 sizeof (struct srp_direct_buf);
3204 target->max_iu_len = sizeof (struct srp_cmd) +
3205 sizeof (struct srp_indirect_buf) +
3206 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3208 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3209 INIT_WORK(&target->remove_work, srp_remove_work);
3210 spin_lock_init(&target->lock);
3211 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3216 target->ch_count = max_t(unsigned, num_online_nodes(),
3218 min(4 * num_online_nodes(),
3219 ibdev->num_comp_vectors),
3220 num_online_cpus()));
3221 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3227 for_each_online_node(node) {
3228 const int ch_start = (node_idx * target->ch_count /
3229 num_online_nodes());
3230 const int ch_end = ((node_idx + 1) * target->ch_count /
3231 num_online_nodes());
3232 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3233 num_online_nodes() + target->comp_vector)
3234 % ibdev->num_comp_vectors;
3235 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3236 num_online_nodes() + target->comp_vector)
3237 % ibdev->num_comp_vectors;
3240 for_each_online_cpu(cpu) {
3241 if (cpu_to_node(cpu) != node)
3243 if (ch_start + cpu_idx >= ch_end)
3245 ch = &target->ch[ch_start + cpu_idx];
3246 ch->target = target;
3247 ch->comp_vector = cv_start == cv_end ? cv_start :
3248 cv_start + cpu_idx % (cv_end - cv_start);
3249 spin_lock_init(&ch->lock);
3250 INIT_LIST_HEAD(&ch->free_tx);
3251 ret = srp_new_cm_id(ch);
3253 goto err_disconnect;
3255 ret = srp_create_ch_ib(ch);
3257 goto err_disconnect;
3259 ret = srp_alloc_req_data(ch);
3261 goto err_disconnect;
3263 ret = srp_connect_ch(ch, multich);
3265 shost_printk(KERN_ERR, target->scsi_host,
3266 PFX "Connection %d/%d failed\n",
3269 if (node_idx == 0 && cpu_idx == 0) {
3270 goto err_disconnect;
3272 srp_free_ch_ib(target, ch);
3273 srp_free_req_data(target, ch);
3274 target->ch_count = ch - target->ch;
3285 target->scsi_host->nr_hw_queues = target->ch_count;
3287 ret = srp_add_target(host, target);
3289 goto err_disconnect;
3291 if (target->state != SRP_TARGET_REMOVED) {
3292 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3293 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3294 be64_to_cpu(target->id_ext),
3295 be64_to_cpu(target->ioc_guid),
3296 be16_to_cpu(target->pkey),
3297 be64_to_cpu(target->service_id),
3298 target->sgid.raw, target->orig_dgid.raw);
3304 mutex_unlock(&host->add_target_mutex);
3306 scsi_host_put(target->scsi_host);
3311 srp_disconnect_target(target);
3313 for (i = 0; i < target->ch_count; i++) {
3314 ch = &target->ch[i];
3315 srp_free_ch_ib(target, ch);
3316 srp_free_req_data(target, ch);
3322 scsi_host_put(target_host);
3326 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3328 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3331 struct srp_host *host = container_of(dev, struct srp_host, dev);
3333 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3336 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3338 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3341 struct srp_host *host = container_of(dev, struct srp_host, dev);
3343 return sprintf(buf, "%d\n", host->port);
3346 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3348 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3350 struct srp_host *host;
3352 host = kzalloc(sizeof *host, GFP_KERNEL);
3356 INIT_LIST_HEAD(&host->target_list);
3357 spin_lock_init(&host->target_lock);
3358 init_completion(&host->released);
3359 mutex_init(&host->add_target_mutex);
3360 host->srp_dev = device;
3363 host->dev.class = &srp_class;
3364 host->dev.parent = device->dev->dma_device;
3365 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3367 if (device_register(&host->dev))
3369 if (device_create_file(&host->dev, &dev_attr_add_target))
3371 if (device_create_file(&host->dev, &dev_attr_ibdev))
3373 if (device_create_file(&host->dev, &dev_attr_port))
3379 device_unregister(&host->dev);
3387 static void srp_add_one(struct ib_device *device)
3389 struct srp_device *srp_dev;
3390 struct ib_device_attr *dev_attr;
3391 struct srp_host *host;
3392 int mr_page_shift, s, e, p;
3393 u64 max_pages_per_mr;
3395 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3399 if (ib_query_device(device, dev_attr)) {
3400 pr_warn("Query device failed for %s\n", device->name);
3404 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3408 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3409 device->map_phys_fmr && device->unmap_fmr);
3410 srp_dev->has_fr = (dev_attr->device_cap_flags &
3411 IB_DEVICE_MEM_MGT_EXTENSIONS);
3412 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3413 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3415 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3416 (!srp_dev->has_fmr || prefer_fr));
3419 * Use the smallest page size supported by the HCA, down to a
3420 * minimum of 4096 bytes. We're unlikely to build large sglists
3421 * out of smaller entries.
3423 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3424 srp_dev->mr_page_size = 1 << mr_page_shift;
3425 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3426 max_pages_per_mr = dev_attr->max_mr_size;
3427 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3428 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3430 if (srp_dev->use_fast_reg) {
3431 srp_dev->max_pages_per_mr =
3432 min_t(u32, srp_dev->max_pages_per_mr,
3433 dev_attr->max_fast_reg_page_list_len);
3435 srp_dev->mr_max_size = srp_dev->mr_page_size *
3436 srp_dev->max_pages_per_mr;
3437 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3438 device->name, mr_page_shift, dev_attr->max_mr_size,
3439 dev_attr->max_fast_reg_page_list_len,
3440 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3442 INIT_LIST_HEAD(&srp_dev->dev_list);
3444 srp_dev->dev = device;
3445 srp_dev->pd = ib_alloc_pd(device);
3446 if (IS_ERR(srp_dev->pd))
3449 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3450 IB_ACCESS_LOCAL_WRITE |
3451 IB_ACCESS_REMOTE_READ |
3452 IB_ACCESS_REMOTE_WRITE);
3453 if (IS_ERR(srp_dev->mr))
3456 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3461 e = device->phys_port_cnt;
3464 for (p = s; p <= e; ++p) {
3465 host = srp_add_port(srp_dev, p);
3467 list_add_tail(&host->list, &srp_dev->dev_list);
3470 ib_set_client_data(device, &srp_client, srp_dev);
3475 ib_dealloc_pd(srp_dev->pd);
3484 static void srp_remove_one(struct ib_device *device)
3486 struct srp_device *srp_dev;
3487 struct srp_host *host, *tmp_host;
3488 struct srp_target_port *target;
3490 srp_dev = ib_get_client_data(device, &srp_client);
3494 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3495 device_unregister(&host->dev);
3497 * Wait for the sysfs entry to go away, so that no new
3498 * target ports can be created.
3500 wait_for_completion(&host->released);
3503 * Remove all target ports.
3505 spin_lock(&host->target_lock);
3506 list_for_each_entry(target, &host->target_list, list)
3507 srp_queue_remove_work(target);
3508 spin_unlock(&host->target_lock);
3511 * Wait for tl_err and target port removal tasks.
3513 flush_workqueue(system_long_wq);
3514 flush_workqueue(srp_remove_wq);
3519 ib_dereg_mr(srp_dev->mr);
3520 ib_dealloc_pd(srp_dev->pd);
3525 static struct srp_function_template ib_srp_transport_functions = {
3526 .has_rport_state = true,
3527 .reset_timer_if_blocked = true,
3528 .reconnect_delay = &srp_reconnect_delay,
3529 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3530 .dev_loss_tmo = &srp_dev_loss_tmo,
3531 .reconnect = srp_rport_reconnect,
3532 .rport_delete = srp_rport_delete,
3533 .terminate_rport_io = srp_terminate_io,
3536 static int __init srp_init_module(void)
3540 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3542 if (srp_sg_tablesize) {
3543 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3544 if (!cmd_sg_entries)
3545 cmd_sg_entries = srp_sg_tablesize;
3548 if (!cmd_sg_entries)
3549 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3551 if (cmd_sg_entries > 255) {
3552 pr_warn("Clamping cmd_sg_entries to 255\n");
3553 cmd_sg_entries = 255;
3556 if (!indirect_sg_entries)
3557 indirect_sg_entries = cmd_sg_entries;
3558 else if (indirect_sg_entries < cmd_sg_entries) {
3559 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3561 indirect_sg_entries = cmd_sg_entries;
3564 srp_remove_wq = create_workqueue("srp_remove");
3565 if (!srp_remove_wq) {
3571 ib_srp_transport_template =
3572 srp_attach_transport(&ib_srp_transport_functions);
3573 if (!ib_srp_transport_template)
3576 ret = class_register(&srp_class);
3578 pr_err("couldn't register class infiniband_srp\n");
3582 ib_sa_register_client(&srp_sa_client);
3584 ret = ib_register_client(&srp_client);
3586 pr_err("couldn't register IB client\n");
3594 ib_sa_unregister_client(&srp_sa_client);
3595 class_unregister(&srp_class);
3598 srp_release_transport(ib_srp_transport_template);
3601 destroy_workqueue(srp_remove_wq);
3605 static void __exit srp_cleanup_module(void)
3607 ib_unregister_client(&srp_client);
3608 ib_sa_unregister_client(&srp_sa_client);
3609 class_unregister(&srp_class);
3610 srp_release_transport(ib_srp_transport_template);
3611 destroy_workqueue(srp_remove_wq);
3614 module_init(srp_init_module);
3615 module_exit(srp_cleanup_module);