2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) PFX fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <linux/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_transport_srp.h>
54 #define DRV_NAME "ib_srp"
55 #define PFX DRV_NAME ": "
56 #define DRV_VERSION "0.2"
57 #define DRV_RELDATE "November 1, 2005"
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61 "v" DRV_VERSION " (" DRV_RELDATE ")");
62 MODULE_LICENSE("Dual BSD/GPL");
64 static unsigned int srp_sg_tablesize;
65 static unsigned int cmd_sg_entries;
66 static unsigned int indirect_sg_entries;
67 static bool allow_ext_sg;
68 static int topspin_workarounds = 1;
70 module_param(srp_sg_tablesize, uint, 0444);
71 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
73 module_param(cmd_sg_entries, uint, 0444);
74 MODULE_PARM_DESC(cmd_sg_entries,
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
77 module_param(indirect_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(indirect_sg_entries,
79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
81 module_param(allow_ext_sg, bool, 0444);
82 MODULE_PARM_DESC(allow_ext_sg,
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
85 module_param(topspin_workarounds, int, 0444);
86 MODULE_PARM_DESC(topspin_workarounds,
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
89 static void srp_add_one(struct ib_device *device);
90 static void srp_remove_one(struct ib_device *device);
91 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
92 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
93 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
95 static struct scsi_transport_template *ib_srp_transport_template;
97 static struct ib_client srp_client = {
100 .remove = srp_remove_one
103 static struct ib_sa_client srp_sa_client;
105 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
107 return (struct srp_target_port *) host->hostdata;
110 static const char *srp_target_info(struct Scsi_Host *host)
112 return host_to_target(host)->target_name;
115 static int srp_target_is_topspin(struct srp_target_port *target)
117 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
118 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
120 return topspin_workarounds &&
121 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
122 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
125 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
127 enum dma_data_direction direction)
131 iu = kmalloc(sizeof *iu, gfp_mask);
135 iu->buf = kzalloc(size, gfp_mask);
139 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
141 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
145 iu->direction = direction;
157 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
162 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
168 static void srp_qp_event(struct ib_event *event, void *context)
170 pr_debug("QP event %d\n", event->event);
173 static int srp_init_qp(struct srp_target_port *target,
176 struct ib_qp_attr *attr;
179 attr = kmalloc(sizeof *attr, GFP_KERNEL);
183 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
184 target->srp_host->port,
185 be16_to_cpu(target->path.pkey),
190 attr->qp_state = IB_QPS_INIT;
191 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
192 IB_ACCESS_REMOTE_WRITE);
193 attr->port_num = target->srp_host->port;
195 ret = ib_modify_qp(qp, attr,
206 static int srp_new_cm_id(struct srp_target_port *target)
208 struct ib_cm_id *new_cm_id;
210 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
211 srp_cm_handler, target);
212 if (IS_ERR(new_cm_id))
213 return PTR_ERR(new_cm_id);
216 ib_destroy_cm_id(target->cm_id);
217 target->cm_id = new_cm_id;
222 static int srp_create_target_ib(struct srp_target_port *target)
224 struct ib_qp_init_attr *init_attr;
225 struct ib_cq *recv_cq, *send_cq;
229 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
233 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
234 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
235 if (IS_ERR(recv_cq)) {
236 ret = PTR_ERR(recv_cq);
240 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
241 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
242 if (IS_ERR(send_cq)) {
243 ret = PTR_ERR(send_cq);
247 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
249 init_attr->event_handler = srp_qp_event;
250 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
251 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
252 init_attr->cap.max_recv_sge = 1;
253 init_attr->cap.max_send_sge = 1;
254 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
255 init_attr->qp_type = IB_QPT_RC;
256 init_attr->send_cq = send_cq;
257 init_attr->recv_cq = recv_cq;
259 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
265 ret = srp_init_qp(target, qp);
270 ib_destroy_qp(target->qp);
272 ib_destroy_cq(target->recv_cq);
274 ib_destroy_cq(target->send_cq);
277 target->recv_cq = recv_cq;
278 target->send_cq = send_cq;
287 ib_destroy_cq(send_cq);
290 ib_destroy_cq(recv_cq);
297 static void srp_free_target_ib(struct srp_target_port *target)
301 ib_destroy_qp(target->qp);
302 ib_destroy_cq(target->send_cq);
303 ib_destroy_cq(target->recv_cq);
306 target->send_cq = target->recv_cq = NULL;
308 for (i = 0; i < SRP_RQ_SIZE; ++i)
309 srp_free_iu(target->srp_host, target->rx_ring[i]);
310 for (i = 0; i < SRP_SQ_SIZE; ++i)
311 srp_free_iu(target->srp_host, target->tx_ring[i]);
314 static void srp_path_rec_completion(int status,
315 struct ib_sa_path_rec *pathrec,
318 struct srp_target_port *target = target_ptr;
320 target->status = status;
322 shost_printk(KERN_ERR, target->scsi_host,
323 PFX "Got failed path rec status %d\n", status);
325 target->path = *pathrec;
326 complete(&target->done);
329 static int srp_lookup_path(struct srp_target_port *target)
331 target->path.numb_path = 1;
333 init_completion(&target->done);
335 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
336 target->srp_host->srp_dev->dev,
337 target->srp_host->port,
339 IB_SA_PATH_REC_SERVICE_ID |
340 IB_SA_PATH_REC_DGID |
341 IB_SA_PATH_REC_SGID |
342 IB_SA_PATH_REC_NUMB_PATH |
344 SRP_PATH_REC_TIMEOUT_MS,
346 srp_path_rec_completion,
347 target, &target->path_query);
348 if (target->path_query_id < 0)
349 return target->path_query_id;
351 wait_for_completion(&target->done);
353 if (target->status < 0)
354 shost_printk(KERN_WARNING, target->scsi_host,
355 PFX "Path record query failed\n");
357 return target->status;
360 static int srp_send_req(struct srp_target_port *target)
363 struct ib_cm_req_param param;
364 struct srp_login_req priv;
368 req = kzalloc(sizeof *req, GFP_KERNEL);
372 req->param.primary_path = &target->path;
373 req->param.alternate_path = NULL;
374 req->param.service_id = target->service_id;
375 req->param.qp_num = target->qp->qp_num;
376 req->param.qp_type = target->qp->qp_type;
377 req->param.private_data = &req->priv;
378 req->param.private_data_len = sizeof req->priv;
379 req->param.flow_control = 1;
381 get_random_bytes(&req->param.starting_psn, 4);
382 req->param.starting_psn &= 0xffffff;
385 * Pick some arbitrary defaults here; we could make these
386 * module parameters if anyone cared about setting them.
388 req->param.responder_resources = 4;
389 req->param.remote_cm_response_timeout = 20;
390 req->param.local_cm_response_timeout = 20;
391 req->param.retry_count = 7;
392 req->param.rnr_retry_count = 7;
393 req->param.max_cm_retries = 15;
395 req->priv.opcode = SRP_LOGIN_REQ;
397 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
398 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
399 SRP_BUF_FORMAT_INDIRECT);
401 * In the published SRP specification (draft rev. 16a), the
402 * port identifier format is 8 bytes of ID extension followed
403 * by 8 bytes of GUID. Older drafts put the two halves in the
404 * opposite order, so that the GUID comes first.
406 * Targets conforming to these obsolete drafts can be
407 * recognized by the I/O Class they report.
409 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
410 memcpy(req->priv.initiator_port_id,
411 &target->path.sgid.global.interface_id, 8);
412 memcpy(req->priv.initiator_port_id + 8,
413 &target->initiator_ext, 8);
414 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
415 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
417 memcpy(req->priv.initiator_port_id,
418 &target->initiator_ext, 8);
419 memcpy(req->priv.initiator_port_id + 8,
420 &target->path.sgid.global.interface_id, 8);
421 memcpy(req->priv.target_port_id, &target->id_ext, 8);
422 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
426 * Topspin/Cisco SRP targets will reject our login unless we
427 * zero out the first 8 bytes of our initiator port ID and set
428 * the second 8 bytes to the local node GUID.
430 if (srp_target_is_topspin(target)) {
431 shost_printk(KERN_DEBUG, target->scsi_host,
432 PFX "Topspin/Cisco initiator port ID workaround "
433 "activated for target GUID %016llx\n",
434 (unsigned long long) be64_to_cpu(target->ioc_guid));
435 memset(req->priv.initiator_port_id, 0, 8);
436 memcpy(req->priv.initiator_port_id + 8,
437 &target->srp_host->srp_dev->dev->node_guid, 8);
440 status = ib_send_cm_req(target->cm_id, &req->param);
447 static bool srp_queue_remove_work(struct srp_target_port *target)
449 bool changed = false;
451 spin_lock_irq(&target->lock);
452 if (target->state != SRP_TARGET_REMOVED) {
453 target->state = SRP_TARGET_REMOVED;
456 spin_unlock_irq(&target->lock);
459 queue_work(system_long_wq, &target->remove_work);
464 static bool srp_change_conn_state(struct srp_target_port *target,
467 bool changed = false;
469 spin_lock_irq(&target->lock);
470 if (target->connected != connected) {
471 target->connected = connected;
474 spin_unlock_irq(&target->lock);
479 static void srp_disconnect_target(struct srp_target_port *target)
481 if (srp_change_conn_state(target, false)) {
482 /* XXX should send SRP_I_LOGOUT request */
484 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
485 shost_printk(KERN_DEBUG, target->scsi_host,
486 PFX "Sending CM DREQ failed\n");
491 static void srp_free_req_data(struct srp_target_port *target)
493 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
494 struct srp_request *req;
497 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
498 kfree(req->fmr_list);
499 kfree(req->map_page);
500 if (req->indirect_dma_addr) {
501 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
502 target->indirect_size,
505 kfree(req->indirect_desc);
510 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
511 * @shost: SCSI host whose attributes to remove from sysfs.
513 * Note: Any attributes defined in the host template and that did not exist
514 * before invocation of this function will be ignored.
516 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
518 struct device_attribute **attr;
520 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
521 device_remove_file(&shost->shost_dev, *attr);
524 static void srp_remove_target(struct srp_target_port *target)
526 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
528 srp_del_scsi_host_attr(target->scsi_host);
529 srp_remove_host(target->scsi_host);
530 scsi_remove_host(target->scsi_host);
531 srp_disconnect_target(target);
532 ib_destroy_cm_id(target->cm_id);
533 srp_free_target_ib(target);
534 srp_free_req_data(target);
535 scsi_host_put(target->scsi_host);
538 static void srp_remove_work(struct work_struct *work)
540 struct srp_target_port *target =
541 container_of(work, struct srp_target_port, remove_work);
543 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
545 spin_lock(&target->srp_host->target_lock);
546 list_del(&target->list);
547 spin_unlock(&target->srp_host->target_lock);
549 srp_remove_target(target);
552 static void srp_rport_delete(struct srp_rport *rport)
554 struct srp_target_port *target = rport->lld_data;
556 srp_queue_remove_work(target);
559 static int srp_connect_target(struct srp_target_port *target)
564 WARN_ON_ONCE(target->connected);
566 target->qp_in_error = false;
568 ret = srp_lookup_path(target);
573 init_completion(&target->done);
574 ret = srp_send_req(target);
577 wait_for_completion(&target->done);
580 * The CM event handling code will set status to
581 * SRP_PORT_REDIRECT if we get a port redirect REJ
582 * back, or SRP_DLID_REDIRECT if we get a lid/qp
585 switch (target->status) {
587 srp_change_conn_state(target, true);
590 case SRP_PORT_REDIRECT:
591 ret = srp_lookup_path(target);
596 case SRP_DLID_REDIRECT:
600 /* Our current CM id was stale, and is now in timewait.
601 * Try to reconnect with a new one.
603 if (!retries-- || srp_new_cm_id(target)) {
604 shost_printk(KERN_ERR, target->scsi_host, PFX
605 "giving up on stale connection\n");
606 target->status = -ECONNRESET;
607 return target->status;
610 shost_printk(KERN_ERR, target->scsi_host, PFX
611 "retrying stale connection\n");
615 return target->status;
620 static void srp_unmap_data(struct scsi_cmnd *scmnd,
621 struct srp_target_port *target,
622 struct srp_request *req)
624 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
625 struct ib_pool_fmr **pfmr;
627 if (!scsi_sglist(scmnd) ||
628 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
629 scmnd->sc_data_direction != DMA_FROM_DEVICE))
632 pfmr = req->fmr_list;
634 ib_fmr_pool_unmap(*pfmr++);
636 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
637 scmnd->sc_data_direction);
641 * srp_claim_req - Take ownership of the scmnd associated with a request.
642 * @target: SRP target port.
644 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
645 * ownership of @req->scmnd if it equals @scmnd.
648 * Either NULL or a pointer to the SCSI command the caller became owner of.
650 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
651 struct srp_request *req,
652 struct scsi_cmnd *scmnd)
656 spin_lock_irqsave(&target->lock, flags);
660 } else if (req->scmnd == scmnd) {
665 spin_unlock_irqrestore(&target->lock, flags);
671 * srp_free_req() - Unmap data and add request to the free request list.
673 static void srp_free_req(struct srp_target_port *target,
674 struct srp_request *req, struct scsi_cmnd *scmnd,
679 srp_unmap_data(scmnd, target, req);
681 spin_lock_irqsave(&target->lock, flags);
682 target->req_lim += req_lim_delta;
683 list_add_tail(&req->list, &target->free_reqs);
684 spin_unlock_irqrestore(&target->lock, flags);
687 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
689 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
692 srp_free_req(target, req, scmnd, 0);
693 scmnd->result = DID_RESET << 16;
694 scmnd->scsi_done(scmnd);
698 static int srp_reconnect_target(struct srp_target_port *target)
700 struct Scsi_Host *shost = target->scsi_host;
703 if (target->state != SRP_TARGET_LIVE)
706 scsi_target_block(&shost->shost_gendev);
708 srp_disconnect_target(target);
710 * Now get a new local CM ID so that we avoid confusing the
711 * target in case things are really fouled up.
713 ret = srp_new_cm_id(target);
717 ret = srp_create_target_ib(target);
721 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
722 struct srp_request *req = &target->req_ring[i];
724 srp_reset_req(target, req);
727 INIT_LIST_HEAD(&target->free_tx);
728 for (i = 0; i < SRP_SQ_SIZE; ++i)
729 list_add(&target->tx_ring[i]->list, &target->free_tx);
731 ret = srp_connect_target(target);
734 scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
735 SDEV_TRANSPORT_OFFLINE);
740 shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
745 shost_printk(KERN_ERR, target->scsi_host,
746 PFX "reconnect failed (%d), removing target port.\n", ret);
749 * We couldn't reconnect, so kill our target port off.
750 * However, we have to defer the real removal because we
751 * are in the context of the SCSI error handler now, which
752 * will deadlock if we call scsi_remove_host().
754 srp_queue_remove_work(target);
759 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
760 unsigned int dma_len, u32 rkey)
762 struct srp_direct_buf *desc = state->desc;
764 desc->va = cpu_to_be64(dma_addr);
765 desc->key = cpu_to_be32(rkey);
766 desc->len = cpu_to_be32(dma_len);
768 state->total_len += dma_len;
773 static int srp_map_finish_fmr(struct srp_map_state *state,
774 struct srp_target_port *target)
776 struct srp_device *dev = target->srp_host->srp_dev;
777 struct ib_pool_fmr *fmr;
783 if (state->npages == 1) {
784 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
786 state->npages = state->fmr_len = 0;
790 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
791 state->npages, io_addr);
795 *state->next_fmr++ = fmr;
798 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
799 state->npages = state->fmr_len = 0;
803 static void srp_map_update_start(struct srp_map_state *state,
804 struct scatterlist *sg, int sg_index,
807 state->unmapped_sg = sg;
808 state->unmapped_index = sg_index;
809 state->unmapped_addr = dma_addr;
812 static int srp_map_sg_entry(struct srp_map_state *state,
813 struct srp_target_port *target,
814 struct scatterlist *sg, int sg_index,
817 struct srp_device *dev = target->srp_host->srp_dev;
818 struct ib_device *ibdev = dev->dev;
819 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
820 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
827 if (use_fmr == SRP_MAP_NO_FMR) {
828 /* Once we're in direct map mode for a request, we don't
829 * go back to FMR mode, so no need to update anything
830 * other than the descriptor.
832 srp_map_desc(state, dma_addr, dma_len, target->rkey);
836 /* If we start at an offset into the FMR page, don't merge into
837 * the current FMR. Finish it out, and use the kernel's MR for this
838 * sg entry. This is to avoid potential bugs on some SRP targets
839 * that were never quite defined, but went away when the initiator
840 * avoided using FMR on such page fragments.
842 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
843 ret = srp_map_finish_fmr(state, target);
847 srp_map_desc(state, dma_addr, dma_len, target->rkey);
848 srp_map_update_start(state, NULL, 0, 0);
852 /* If this is the first sg to go into the FMR, save our position.
853 * We need to know the first unmapped entry, its index, and the
854 * first unmapped address within that entry to be able to restart
855 * mapping after an error.
857 if (!state->unmapped_sg)
858 srp_map_update_start(state, sg, sg_index, dma_addr);
861 if (state->npages == SRP_FMR_SIZE) {
862 ret = srp_map_finish_fmr(state, target);
866 srp_map_update_start(state, sg, sg_index, dma_addr);
869 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
872 state->base_dma_addr = dma_addr;
873 state->pages[state->npages++] = dma_addr;
874 state->fmr_len += len;
879 /* If the last entry of the FMR wasn't a full page, then we need to
880 * close it out and start a new one -- we can only merge at page
884 if (len != dev->fmr_page_size) {
885 ret = srp_map_finish_fmr(state, target);
887 srp_map_update_start(state, NULL, 0, 0);
892 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
893 struct srp_request *req)
895 struct scatterlist *scat, *sg;
896 struct srp_cmd *cmd = req->cmd->buf;
897 int i, len, nents, count, use_fmr;
898 struct srp_device *dev;
899 struct ib_device *ibdev;
900 struct srp_map_state state;
901 struct srp_indirect_buf *indirect_hdr;
905 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
906 return sizeof (struct srp_cmd);
908 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
909 scmnd->sc_data_direction != DMA_TO_DEVICE) {
910 shost_printk(KERN_WARNING, target->scsi_host,
911 PFX "Unhandled data direction %d\n",
912 scmnd->sc_data_direction);
916 nents = scsi_sg_count(scmnd);
917 scat = scsi_sglist(scmnd);
919 dev = target->srp_host->srp_dev;
922 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
923 if (unlikely(count == 0))
926 fmt = SRP_DATA_DESC_DIRECT;
927 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
931 * The midlayer only generated a single gather/scatter
932 * entry, or DMA mapping coalesced everything to a
933 * single entry. So a direct descriptor along with
934 * the DMA MR suffices.
936 struct srp_direct_buf *buf = (void *) cmd->add_data;
938 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
939 buf->key = cpu_to_be32(target->rkey);
940 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
946 /* We have more than one scatter/gather entry, so build our indirect
947 * descriptor table, trying to merge as many entries with FMR as we
950 indirect_hdr = (void *) cmd->add_data;
952 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
953 target->indirect_size, DMA_TO_DEVICE);
955 memset(&state, 0, sizeof(state));
956 state.desc = req->indirect_desc;
957 state.pages = req->map_page;
958 state.next_fmr = req->fmr_list;
960 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
962 for_each_sg(scat, sg, count, i) {
963 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
964 /* FMR mapping failed, so backtrack to the first
965 * unmapped entry and continue on without using FMR.
968 unsigned int dma_len;
971 sg = state.unmapped_sg;
972 i = state.unmapped_index;
974 dma_addr = ib_sg_dma_address(ibdev, sg);
975 dma_len = ib_sg_dma_len(ibdev, sg);
976 dma_len -= (state.unmapped_addr - dma_addr);
977 dma_addr = state.unmapped_addr;
978 use_fmr = SRP_MAP_NO_FMR;
979 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
983 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
986 /* We've mapped the request, now pull as much of the indirect
987 * descriptor table as we can into the command buffer. If this
988 * target is not using an external indirect table, we are
989 * guaranteed to fit into the command, as the SCSI layer won't
990 * give us more S/G entries than we allow.
992 req->nfmr = state.nfmr;
993 if (state.ndesc == 1) {
994 /* FMR mapping was able to collapse this to one entry,
995 * so use a direct descriptor.
997 struct srp_direct_buf *buf = (void *) cmd->add_data;
999 *buf = req->indirect_desc[0];
1003 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1004 !target->allow_ext_sg)) {
1005 shost_printk(KERN_ERR, target->scsi_host,
1006 "Could not fit S/G list into SRP_CMD\n");
1010 count = min(state.ndesc, target->cmd_sg_cnt);
1011 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1013 fmt = SRP_DATA_DESC_INDIRECT;
1014 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1015 len += count * sizeof (struct srp_direct_buf);
1017 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1018 count * sizeof (struct srp_direct_buf));
1020 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1021 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1022 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1023 indirect_hdr->len = cpu_to_be32(state.total_len);
1025 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1026 cmd->data_out_desc_cnt = count;
1028 cmd->data_in_desc_cnt = count;
1030 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1034 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1035 cmd->buf_fmt = fmt << 4;
1043 * Return an IU and possible credit to the free pool
1045 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1046 enum srp_iu_type iu_type)
1048 unsigned long flags;
1050 spin_lock_irqsave(&target->lock, flags);
1051 list_add(&iu->list, &target->free_tx);
1052 if (iu_type != SRP_IU_RSP)
1054 spin_unlock_irqrestore(&target->lock, flags);
1058 * Must be called with target->lock held to protect req_lim and free_tx.
1059 * If IU is not sent, it must be returned using srp_put_tx_iu().
1062 * An upper limit for the number of allocated information units for each
1064 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1065 * more than Scsi_Host.can_queue requests.
1066 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1067 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1068 * one unanswered SRP request to an initiator.
1070 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1071 enum srp_iu_type iu_type)
1073 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1076 srp_send_completion(target->send_cq, target);
1078 if (list_empty(&target->free_tx))
1081 /* Initiator responses to target requests do not consume credits */
1082 if (iu_type != SRP_IU_RSP) {
1083 if (target->req_lim <= rsv) {
1084 ++target->zero_req_lim;
1091 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1092 list_del(&iu->list);
1096 static int srp_post_send(struct srp_target_port *target,
1097 struct srp_iu *iu, int len)
1100 struct ib_send_wr wr, *bad_wr;
1102 list.addr = iu->dma;
1104 list.lkey = target->lkey;
1107 wr.wr_id = (uintptr_t) iu;
1110 wr.opcode = IB_WR_SEND;
1111 wr.send_flags = IB_SEND_SIGNALED;
1113 return ib_post_send(target->qp, &wr, &bad_wr);
1116 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1118 struct ib_recv_wr wr, *bad_wr;
1121 list.addr = iu->dma;
1122 list.length = iu->size;
1123 list.lkey = target->lkey;
1126 wr.wr_id = (uintptr_t) iu;
1130 return ib_post_recv(target->qp, &wr, &bad_wr);
1133 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1135 struct srp_request *req;
1136 struct scsi_cmnd *scmnd;
1137 unsigned long flags;
1139 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1140 spin_lock_irqsave(&target->lock, flags);
1141 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1142 spin_unlock_irqrestore(&target->lock, flags);
1144 target->tsk_mgmt_status = -1;
1145 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1146 target->tsk_mgmt_status = rsp->data[3];
1147 complete(&target->tsk_mgmt_done);
1149 req = &target->req_ring[rsp->tag];
1150 scmnd = srp_claim_req(target, req, NULL);
1152 shost_printk(KERN_ERR, target->scsi_host,
1153 "Null scmnd for RSP w/tag %016llx\n",
1154 (unsigned long long) rsp->tag);
1156 spin_lock_irqsave(&target->lock, flags);
1157 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1158 spin_unlock_irqrestore(&target->lock, flags);
1162 scmnd->result = rsp->status;
1164 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1165 memcpy(scmnd->sense_buffer, rsp->data +
1166 be32_to_cpu(rsp->resp_data_len),
1167 min_t(int, be32_to_cpu(rsp->sense_data_len),
1168 SCSI_SENSE_BUFFERSIZE));
1171 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1172 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1173 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1174 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1176 srp_free_req(target, req, scmnd,
1177 be32_to_cpu(rsp->req_lim_delta));
1179 scmnd->host_scribble = NULL;
1180 scmnd->scsi_done(scmnd);
1184 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1187 struct ib_device *dev = target->srp_host->srp_dev->dev;
1188 unsigned long flags;
1192 spin_lock_irqsave(&target->lock, flags);
1193 target->req_lim += req_delta;
1194 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1195 spin_unlock_irqrestore(&target->lock, flags);
1198 shost_printk(KERN_ERR, target->scsi_host, PFX
1199 "no IU available to send response\n");
1203 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1204 memcpy(iu->buf, rsp, len);
1205 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1207 err = srp_post_send(target, iu, len);
1209 shost_printk(KERN_ERR, target->scsi_host, PFX
1210 "unable to post response: %d\n", err);
1211 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1217 static void srp_process_cred_req(struct srp_target_port *target,
1218 struct srp_cred_req *req)
1220 struct srp_cred_rsp rsp = {
1221 .opcode = SRP_CRED_RSP,
1224 s32 delta = be32_to_cpu(req->req_lim_delta);
1226 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1227 shost_printk(KERN_ERR, target->scsi_host, PFX
1228 "problems processing SRP_CRED_REQ\n");
1231 static void srp_process_aer_req(struct srp_target_port *target,
1232 struct srp_aer_req *req)
1234 struct srp_aer_rsp rsp = {
1235 .opcode = SRP_AER_RSP,
1238 s32 delta = be32_to_cpu(req->req_lim_delta);
1240 shost_printk(KERN_ERR, target->scsi_host, PFX
1241 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1243 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1244 shost_printk(KERN_ERR, target->scsi_host, PFX
1245 "problems processing SRP_AER_REQ\n");
1248 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1250 struct ib_device *dev = target->srp_host->srp_dev->dev;
1251 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1255 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1258 opcode = *(u8 *) iu->buf;
1261 shost_printk(KERN_ERR, target->scsi_host,
1262 PFX "recv completion, opcode 0x%02x\n", opcode);
1263 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1264 iu->buf, wc->byte_len, true);
1269 srp_process_rsp(target, iu->buf);
1273 srp_process_cred_req(target, iu->buf);
1277 srp_process_aer_req(target, iu->buf);
1281 /* XXX Handle target logout */
1282 shost_printk(KERN_WARNING, target->scsi_host,
1283 PFX "Got target logout request\n");
1287 shost_printk(KERN_WARNING, target->scsi_host,
1288 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1292 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1295 res = srp_post_recv(target, iu);
1297 shost_printk(KERN_ERR, target->scsi_host,
1298 PFX "Recv failed with error code %d\n", res);
1301 static void srp_handle_qp_err(enum ib_wc_status wc_status,
1302 enum ib_wc_opcode wc_opcode,
1303 struct srp_target_port *target)
1305 if (target->connected && !target->qp_in_error) {
1306 shost_printk(KERN_ERR, target->scsi_host,
1307 PFX "failed %s status %d\n",
1308 wc_opcode & IB_WC_RECV ? "receive" : "send",
1311 target->qp_in_error = true;
1314 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1316 struct srp_target_port *target = target_ptr;
1319 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1320 while (ib_poll_cq(cq, 1, &wc) > 0) {
1321 if (likely(wc.status == IB_WC_SUCCESS)) {
1322 srp_handle_recv(target, &wc);
1324 srp_handle_qp_err(wc.status, wc.opcode, target);
1329 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1331 struct srp_target_port *target = target_ptr;
1335 while (ib_poll_cq(cq, 1, &wc) > 0) {
1336 if (likely(wc.status == IB_WC_SUCCESS)) {
1337 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1338 list_add(&iu->list, &target->free_tx);
1340 srp_handle_qp_err(wc.status, wc.opcode, target);
1345 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1347 struct srp_target_port *target = host_to_target(shost);
1348 struct srp_request *req;
1350 struct srp_cmd *cmd;
1351 struct ib_device *dev;
1352 unsigned long flags;
1355 spin_lock_irqsave(&target->lock, flags);
1356 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1360 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1361 list_del(&req->list);
1362 spin_unlock_irqrestore(&target->lock, flags);
1364 dev = target->srp_host->srp_dev->dev;
1365 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1369 scmnd->host_scribble = (void *) req;
1372 memset(cmd, 0, sizeof *cmd);
1374 cmd->opcode = SRP_CMD;
1375 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1376 cmd->tag = req->index;
1377 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1382 len = srp_map_data(scmnd, target, req);
1384 shost_printk(KERN_ERR, target->scsi_host,
1385 PFX "Failed to map data\n");
1389 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1392 if (srp_post_send(target, iu, len)) {
1393 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1400 srp_unmap_data(scmnd, target, req);
1403 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1405 spin_lock_irqsave(&target->lock, flags);
1406 list_add(&req->list, &target->free_reqs);
1409 spin_unlock_irqrestore(&target->lock, flags);
1411 return SCSI_MLQUEUE_HOST_BUSY;
1414 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1418 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1419 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1420 target->max_ti_iu_len,
1421 GFP_KERNEL, DMA_FROM_DEVICE);
1422 if (!target->rx_ring[i])
1426 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1427 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1429 GFP_KERNEL, DMA_TO_DEVICE);
1430 if (!target->tx_ring[i])
1433 list_add(&target->tx_ring[i]->list, &target->free_tx);
1439 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1440 srp_free_iu(target->srp_host, target->rx_ring[i]);
1441 target->rx_ring[i] = NULL;
1444 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1445 srp_free_iu(target->srp_host, target->tx_ring[i]);
1446 target->tx_ring[i] = NULL;
1452 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1454 uint64_t T_tr_ns, max_compl_time_ms;
1455 uint32_t rq_tmo_jiffies;
1458 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1459 * table 91), both the QP timeout and the retry count have to be set
1460 * for RC QP's during the RTR to RTS transition.
1462 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1463 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1466 * Set target->rq_tmo_jiffies to one second more than the largest time
1467 * it can take before an error completion is generated. See also
1468 * C9-140..142 in the IBTA spec for more information about how to
1469 * convert the QP Local ACK Timeout value to nanoseconds.
1471 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1472 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1473 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1474 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1476 return rq_tmo_jiffies;
1479 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1480 struct srp_login_rsp *lrsp,
1481 struct srp_target_port *target)
1483 struct ib_qp_attr *qp_attr = NULL;
1488 if (lrsp->opcode == SRP_LOGIN_RSP) {
1489 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1490 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1493 * Reserve credits for task management so we don't
1494 * bounce requests back to the SCSI mid-layer.
1496 target->scsi_host->can_queue
1497 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1498 target->scsi_host->can_queue);
1500 shost_printk(KERN_WARNING, target->scsi_host,
1501 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1506 if (!target->rx_ring[0]) {
1507 ret = srp_alloc_iu_bufs(target);
1513 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1517 qp_attr->qp_state = IB_QPS_RTR;
1518 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1522 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1526 for (i = 0; i < SRP_RQ_SIZE; i++) {
1527 struct srp_iu *iu = target->rx_ring[i];
1528 ret = srp_post_recv(target, iu);
1533 qp_attr->qp_state = IB_QPS_RTS;
1534 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1538 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1540 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1544 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1550 target->status = ret;
1553 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1554 struct ib_cm_event *event,
1555 struct srp_target_port *target)
1557 struct Scsi_Host *shost = target->scsi_host;
1558 struct ib_class_port_info *cpi;
1561 switch (event->param.rej_rcvd.reason) {
1562 case IB_CM_REJ_PORT_CM_REDIRECT:
1563 cpi = event->param.rej_rcvd.ari;
1564 target->path.dlid = cpi->redirect_lid;
1565 target->path.pkey = cpi->redirect_pkey;
1566 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1567 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1569 target->status = target->path.dlid ?
1570 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1573 case IB_CM_REJ_PORT_REDIRECT:
1574 if (srp_target_is_topspin(target)) {
1576 * Topspin/Cisco SRP gateways incorrectly send
1577 * reject reason code 25 when they mean 24
1580 memcpy(target->path.dgid.raw,
1581 event->param.rej_rcvd.ari, 16);
1583 shost_printk(KERN_DEBUG, shost,
1584 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1585 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1586 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1588 target->status = SRP_PORT_REDIRECT;
1590 shost_printk(KERN_WARNING, shost,
1591 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1592 target->status = -ECONNRESET;
1596 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1597 shost_printk(KERN_WARNING, shost,
1598 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1599 target->status = -ECONNRESET;
1602 case IB_CM_REJ_CONSUMER_DEFINED:
1603 opcode = *(u8 *) event->private_data;
1604 if (opcode == SRP_LOGIN_REJ) {
1605 struct srp_login_rej *rej = event->private_data;
1606 u32 reason = be32_to_cpu(rej->reason);
1608 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1609 shost_printk(KERN_WARNING, shost,
1610 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1612 shost_printk(KERN_WARNING, shost,
1613 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1615 shost_printk(KERN_WARNING, shost,
1616 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1617 " opcode 0x%02x\n", opcode);
1618 target->status = -ECONNRESET;
1621 case IB_CM_REJ_STALE_CONN:
1622 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1623 target->status = SRP_STALE_CONN;
1627 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1628 event->param.rej_rcvd.reason);
1629 target->status = -ECONNRESET;
1633 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1635 struct srp_target_port *target = cm_id->context;
1638 switch (event->event) {
1639 case IB_CM_REQ_ERROR:
1640 shost_printk(KERN_DEBUG, target->scsi_host,
1641 PFX "Sending CM REQ failed\n");
1643 target->status = -ECONNRESET;
1646 case IB_CM_REP_RECEIVED:
1648 srp_cm_rep_handler(cm_id, event->private_data, target);
1651 case IB_CM_REJ_RECEIVED:
1652 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1655 srp_cm_rej_handler(cm_id, event, target);
1658 case IB_CM_DREQ_RECEIVED:
1659 shost_printk(KERN_WARNING, target->scsi_host,
1660 PFX "DREQ received - connection closed\n");
1661 srp_change_conn_state(target, false);
1662 if (ib_send_cm_drep(cm_id, NULL, 0))
1663 shost_printk(KERN_ERR, target->scsi_host,
1664 PFX "Sending CM DREP failed\n");
1667 case IB_CM_TIMEWAIT_EXIT:
1668 shost_printk(KERN_ERR, target->scsi_host,
1669 PFX "connection closed\n");
1674 case IB_CM_MRA_RECEIVED:
1675 case IB_CM_DREQ_ERROR:
1676 case IB_CM_DREP_RECEIVED:
1680 shost_printk(KERN_WARNING, target->scsi_host,
1681 PFX "Unhandled CM event %d\n", event->event);
1686 complete(&target->done);
1691 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1692 u64 req_tag, unsigned int lun, u8 func)
1694 struct ib_device *dev = target->srp_host->srp_dev->dev;
1696 struct srp_tsk_mgmt *tsk_mgmt;
1698 init_completion(&target->tsk_mgmt_done);
1700 spin_lock_irq(&target->lock);
1701 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1702 spin_unlock_irq(&target->lock);
1707 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1710 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1712 tsk_mgmt->opcode = SRP_TSK_MGMT;
1713 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1714 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1715 tsk_mgmt->tsk_mgmt_func = func;
1716 tsk_mgmt->task_tag = req_tag;
1718 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1720 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1721 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1725 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1726 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1732 static int srp_abort(struct scsi_cmnd *scmnd)
1734 struct srp_target_port *target = host_to_target(scmnd->device->host);
1735 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1737 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1739 if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
1741 srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1742 SRP_TSK_ABORT_TASK);
1743 srp_free_req(target, req, scmnd, 0);
1744 scmnd->result = DID_ABORT << 16;
1745 scmnd->scsi_done(scmnd);
1750 static int srp_reset_device(struct scsi_cmnd *scmnd)
1752 struct srp_target_port *target = host_to_target(scmnd->device->host);
1755 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1757 if (target->qp_in_error)
1759 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1762 if (target->tsk_mgmt_status)
1765 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1766 struct srp_request *req = &target->req_ring[i];
1767 if (req->scmnd && req->scmnd->device == scmnd->device)
1768 srp_reset_req(target, req);
1774 static int srp_reset_host(struct scsi_cmnd *scmnd)
1776 struct srp_target_port *target = host_to_target(scmnd->device->host);
1779 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1781 if (!srp_reconnect_target(target))
1787 static int srp_slave_configure(struct scsi_device *sdev)
1789 struct Scsi_Host *shost = sdev->host;
1790 struct srp_target_port *target = host_to_target(shost);
1791 struct request_queue *q = sdev->request_queue;
1792 unsigned long timeout;
1794 if (sdev->type == TYPE_DISK) {
1795 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
1796 blk_queue_rq_timeout(q, timeout);
1802 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1805 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1807 return sprintf(buf, "0x%016llx\n",
1808 (unsigned long long) be64_to_cpu(target->id_ext));
1811 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1814 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1816 return sprintf(buf, "0x%016llx\n",
1817 (unsigned long long) be64_to_cpu(target->ioc_guid));
1820 static ssize_t show_service_id(struct device *dev,
1821 struct device_attribute *attr, char *buf)
1823 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1825 return sprintf(buf, "0x%016llx\n",
1826 (unsigned long long) be64_to_cpu(target->service_id));
1829 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1832 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1834 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1837 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1840 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1842 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1845 static ssize_t show_orig_dgid(struct device *dev,
1846 struct device_attribute *attr, char *buf)
1848 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1850 return sprintf(buf, "%pI6\n", target->orig_dgid);
1853 static ssize_t show_req_lim(struct device *dev,
1854 struct device_attribute *attr, char *buf)
1856 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1858 return sprintf(buf, "%d\n", target->req_lim);
1861 static ssize_t show_zero_req_lim(struct device *dev,
1862 struct device_attribute *attr, char *buf)
1864 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1866 return sprintf(buf, "%d\n", target->zero_req_lim);
1869 static ssize_t show_local_ib_port(struct device *dev,
1870 struct device_attribute *attr, char *buf)
1872 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1874 return sprintf(buf, "%d\n", target->srp_host->port);
1877 static ssize_t show_local_ib_device(struct device *dev,
1878 struct device_attribute *attr, char *buf)
1880 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1882 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1885 static ssize_t show_cmd_sg_entries(struct device *dev,
1886 struct device_attribute *attr, char *buf)
1888 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1890 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1893 static ssize_t show_allow_ext_sg(struct device *dev,
1894 struct device_attribute *attr, char *buf)
1896 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1898 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
1901 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1902 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1903 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1904 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1905 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1906 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
1907 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
1908 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1909 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1910 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1911 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
1912 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
1914 static struct device_attribute *srp_host_attrs[] = {
1917 &dev_attr_service_id,
1920 &dev_attr_orig_dgid,
1922 &dev_attr_zero_req_lim,
1923 &dev_attr_local_ib_port,
1924 &dev_attr_local_ib_device,
1925 &dev_attr_cmd_sg_entries,
1926 &dev_attr_allow_ext_sg,
1930 static struct scsi_host_template srp_template = {
1931 .module = THIS_MODULE,
1932 .name = "InfiniBand SRP initiator",
1933 .proc_name = DRV_NAME,
1934 .slave_configure = srp_slave_configure,
1935 .info = srp_target_info,
1936 .queuecommand = srp_queuecommand,
1937 .eh_abort_handler = srp_abort,
1938 .eh_device_reset_handler = srp_reset_device,
1939 .eh_host_reset_handler = srp_reset_host,
1940 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
1941 .can_queue = SRP_CMD_SQ_SIZE,
1943 .cmd_per_lun = SRP_CMD_SQ_SIZE,
1944 .use_clustering = ENABLE_CLUSTERING,
1945 .shost_attrs = srp_host_attrs
1948 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1950 struct srp_rport_identifiers ids;
1951 struct srp_rport *rport;
1953 sprintf(target->target_name, "SRP.T10:%016llX",
1954 (unsigned long long) be64_to_cpu(target->id_ext));
1956 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
1959 memcpy(ids.port_id, &target->id_ext, 8);
1960 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1961 ids.roles = SRP_RPORT_ROLE_TARGET;
1962 rport = srp_rport_add(target->scsi_host, &ids);
1963 if (IS_ERR(rport)) {
1964 scsi_remove_host(target->scsi_host);
1965 return PTR_ERR(rport);
1968 rport->lld_data = target;
1970 spin_lock(&host->target_lock);
1971 list_add_tail(&target->list, &host->target_list);
1972 spin_unlock(&host->target_lock);
1974 target->state = SRP_TARGET_LIVE;
1975 target->connected = false;
1977 scsi_scan_target(&target->scsi_host->shost_gendev,
1978 0, target->scsi_id, SCAN_WILD_CARD, 0);
1983 static void srp_release_dev(struct device *dev)
1985 struct srp_host *host =
1986 container_of(dev, struct srp_host, dev);
1988 complete(&host->released);
1991 static struct class srp_class = {
1992 .name = "infiniband_srp",
1993 .dev_release = srp_release_dev
1997 * Target ports are added by writing
1999 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2000 * pkey=<P_Key>,service_id=<service ID>
2002 * to the add_target sysfs attribute.
2006 SRP_OPT_ID_EXT = 1 << 0,
2007 SRP_OPT_IOC_GUID = 1 << 1,
2008 SRP_OPT_DGID = 1 << 2,
2009 SRP_OPT_PKEY = 1 << 3,
2010 SRP_OPT_SERVICE_ID = 1 << 4,
2011 SRP_OPT_MAX_SECT = 1 << 5,
2012 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2013 SRP_OPT_IO_CLASS = 1 << 7,
2014 SRP_OPT_INITIATOR_EXT = 1 << 8,
2015 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2016 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2017 SRP_OPT_SG_TABLESIZE = 1 << 11,
2018 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2022 SRP_OPT_SERVICE_ID),
2025 static const match_table_t srp_opt_tokens = {
2026 { SRP_OPT_ID_EXT, "id_ext=%s" },
2027 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2028 { SRP_OPT_DGID, "dgid=%s" },
2029 { SRP_OPT_PKEY, "pkey=%x" },
2030 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2031 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2032 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2033 { SRP_OPT_IO_CLASS, "io_class=%x" },
2034 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2035 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2036 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2037 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2038 { SRP_OPT_ERR, NULL }
2041 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2043 char *options, *sep_opt;
2046 substring_t args[MAX_OPT_ARGS];
2052 options = kstrdup(buf, GFP_KERNEL);
2057 while ((p = strsep(&sep_opt, ",")) != NULL) {
2061 token = match_token(p, srp_opt_tokens, args);
2065 case SRP_OPT_ID_EXT:
2066 p = match_strdup(args);
2071 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2075 case SRP_OPT_IOC_GUID:
2076 p = match_strdup(args);
2081 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2086 p = match_strdup(args);
2091 if (strlen(p) != 32) {
2092 pr_warn("bad dest GID parameter '%s'\n", p);
2097 for (i = 0; i < 16; ++i) {
2098 strlcpy(dgid, p + i * 2, 3);
2099 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2102 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2106 if (match_hex(args, &token)) {
2107 pr_warn("bad P_Key parameter '%s'\n", p);
2110 target->path.pkey = cpu_to_be16(token);
2113 case SRP_OPT_SERVICE_ID:
2114 p = match_strdup(args);
2119 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2120 target->path.service_id = target->service_id;
2124 case SRP_OPT_MAX_SECT:
2125 if (match_int(args, &token)) {
2126 pr_warn("bad max sect parameter '%s'\n", p);
2129 target->scsi_host->max_sectors = token;
2132 case SRP_OPT_MAX_CMD_PER_LUN:
2133 if (match_int(args, &token)) {
2134 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2138 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
2141 case SRP_OPT_IO_CLASS:
2142 if (match_hex(args, &token)) {
2143 pr_warn("bad IO class parameter '%s'\n", p);
2146 if (token != SRP_REV10_IB_IO_CLASS &&
2147 token != SRP_REV16A_IB_IO_CLASS) {
2148 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2149 token, SRP_REV10_IB_IO_CLASS,
2150 SRP_REV16A_IB_IO_CLASS);
2153 target->io_class = token;
2156 case SRP_OPT_INITIATOR_EXT:
2157 p = match_strdup(args);
2162 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2166 case SRP_OPT_CMD_SG_ENTRIES:
2167 if (match_int(args, &token) || token < 1 || token > 255) {
2168 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2172 target->cmd_sg_cnt = token;
2175 case SRP_OPT_ALLOW_EXT_SG:
2176 if (match_int(args, &token)) {
2177 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2180 target->allow_ext_sg = !!token;
2183 case SRP_OPT_SG_TABLESIZE:
2184 if (match_int(args, &token) || token < 1 ||
2185 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2186 pr_warn("bad max sg_tablesize parameter '%s'\n",
2190 target->sg_tablesize = token;
2194 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2200 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2203 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2204 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2205 !(srp_opt_tokens[i].token & opt_mask))
2206 pr_warn("target creation request is missing parameter '%s'\n",
2207 srp_opt_tokens[i].pattern);
2214 static ssize_t srp_create_target(struct device *dev,
2215 struct device_attribute *attr,
2216 const char *buf, size_t count)
2218 struct srp_host *host =
2219 container_of(dev, struct srp_host, dev);
2220 struct Scsi_Host *target_host;
2221 struct srp_target_port *target;
2222 struct ib_device *ibdev = host->srp_dev->dev;
2223 dma_addr_t dma_addr;
2226 target_host = scsi_host_alloc(&srp_template,
2227 sizeof (struct srp_target_port));
2231 target_host->transportt = ib_srp_transport_template;
2232 target_host->max_channel = 0;
2233 target_host->max_id = 1;
2234 target_host->max_lun = SRP_MAX_LUN;
2235 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2237 target = host_to_target(target_host);
2239 target->io_class = SRP_REV16A_IB_IO_CLASS;
2240 target->scsi_host = target_host;
2241 target->srp_host = host;
2242 target->lkey = host->srp_dev->mr->lkey;
2243 target->rkey = host->srp_dev->mr->rkey;
2244 target->cmd_sg_cnt = cmd_sg_entries;
2245 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2246 target->allow_ext_sg = allow_ext_sg;
2248 ret = srp_parse_options(buf, target);
2252 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2253 target->cmd_sg_cnt < target->sg_tablesize) {
2254 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2255 target->sg_tablesize = target->cmd_sg_cnt;
2258 target_host->sg_tablesize = target->sg_tablesize;
2259 target->indirect_size = target->sg_tablesize *
2260 sizeof (struct srp_direct_buf);
2261 target->max_iu_len = sizeof (struct srp_cmd) +
2262 sizeof (struct srp_indirect_buf) +
2263 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2265 INIT_WORK(&target->remove_work, srp_remove_work);
2266 spin_lock_init(&target->lock);
2267 INIT_LIST_HEAD(&target->free_tx);
2268 INIT_LIST_HEAD(&target->free_reqs);
2269 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2270 struct srp_request *req = &target->req_ring[i];
2272 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2274 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2276 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2277 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
2280 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2281 target->indirect_size,
2283 if (ib_dma_mapping_error(ibdev, dma_addr))
2286 req->indirect_dma_addr = dma_addr;
2288 list_add_tail(&req->list, &target->free_reqs);
2291 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2293 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2294 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2295 "service_id %016llx dgid %pI6\n",
2296 (unsigned long long) be64_to_cpu(target->id_ext),
2297 (unsigned long long) be64_to_cpu(target->ioc_guid),
2298 be16_to_cpu(target->path.pkey),
2299 (unsigned long long) be64_to_cpu(target->service_id),
2300 target->path.dgid.raw);
2302 ret = srp_create_target_ib(target);
2306 ret = srp_new_cm_id(target);
2310 ret = srp_connect_target(target);
2312 shost_printk(KERN_ERR, target->scsi_host,
2313 PFX "Connection failed\n");
2317 ret = srp_add_target(host, target);
2319 goto err_disconnect;
2324 srp_disconnect_target(target);
2327 ib_destroy_cm_id(target->cm_id);
2330 srp_free_target_ib(target);
2333 srp_free_req_data(target);
2336 scsi_host_put(target_host);
2341 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2343 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2346 struct srp_host *host = container_of(dev, struct srp_host, dev);
2348 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2351 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2353 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2356 struct srp_host *host = container_of(dev, struct srp_host, dev);
2358 return sprintf(buf, "%d\n", host->port);
2361 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2363 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2365 struct srp_host *host;
2367 host = kzalloc(sizeof *host, GFP_KERNEL);
2371 INIT_LIST_HEAD(&host->target_list);
2372 spin_lock_init(&host->target_lock);
2373 init_completion(&host->released);
2374 host->srp_dev = device;
2377 host->dev.class = &srp_class;
2378 host->dev.parent = device->dev->dma_device;
2379 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2381 if (device_register(&host->dev))
2383 if (device_create_file(&host->dev, &dev_attr_add_target))
2385 if (device_create_file(&host->dev, &dev_attr_ibdev))
2387 if (device_create_file(&host->dev, &dev_attr_port))
2393 device_unregister(&host->dev);
2401 static void srp_add_one(struct ib_device *device)
2403 struct srp_device *srp_dev;
2404 struct ib_device_attr *dev_attr;
2405 struct ib_fmr_pool_param fmr_param;
2406 struct srp_host *host;
2407 int max_pages_per_fmr, fmr_page_shift, s, e, p;
2409 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2413 if (ib_query_device(device, dev_attr)) {
2414 pr_warn("Query device failed for %s\n", device->name);
2418 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2423 * Use the smallest page size supported by the HCA, down to a
2424 * minimum of 4096 bytes. We're unlikely to build large sglists
2425 * out of smaller entries.
2427 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2428 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2429 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2430 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2432 INIT_LIST_HEAD(&srp_dev->dev_list);
2434 srp_dev->dev = device;
2435 srp_dev->pd = ib_alloc_pd(device);
2436 if (IS_ERR(srp_dev->pd))
2439 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2440 IB_ACCESS_LOCAL_WRITE |
2441 IB_ACCESS_REMOTE_READ |
2442 IB_ACCESS_REMOTE_WRITE);
2443 if (IS_ERR(srp_dev->mr))
2446 for (max_pages_per_fmr = SRP_FMR_SIZE;
2447 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2448 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2449 memset(&fmr_param, 0, sizeof fmr_param);
2450 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2451 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2452 fmr_param.cache = 1;
2453 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2454 fmr_param.page_shift = fmr_page_shift;
2455 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2456 IB_ACCESS_REMOTE_WRITE |
2457 IB_ACCESS_REMOTE_READ);
2459 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2460 if (!IS_ERR(srp_dev->fmr_pool))
2464 if (IS_ERR(srp_dev->fmr_pool))
2465 srp_dev->fmr_pool = NULL;
2467 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2472 e = device->phys_port_cnt;
2475 for (p = s; p <= e; ++p) {
2476 host = srp_add_port(srp_dev, p);
2478 list_add_tail(&host->list, &srp_dev->dev_list);
2481 ib_set_client_data(device, &srp_client, srp_dev);
2486 ib_dealloc_pd(srp_dev->pd);
2495 static void srp_remove_one(struct ib_device *device)
2497 struct srp_device *srp_dev;
2498 struct srp_host *host, *tmp_host;
2499 struct srp_target_port *target;
2501 srp_dev = ib_get_client_data(device, &srp_client);
2503 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2504 device_unregister(&host->dev);
2506 * Wait for the sysfs entry to go away, so that no new
2507 * target ports can be created.
2509 wait_for_completion(&host->released);
2512 * Remove all target ports.
2514 spin_lock(&host->target_lock);
2515 list_for_each_entry(target, &host->target_list, list)
2516 srp_queue_remove_work(target);
2517 spin_unlock(&host->target_lock);
2520 * Wait for target port removal tasks.
2522 flush_workqueue(system_long_wq);
2527 if (srp_dev->fmr_pool)
2528 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2529 ib_dereg_mr(srp_dev->mr);
2530 ib_dealloc_pd(srp_dev->pd);
2535 static struct srp_function_template ib_srp_transport_functions = {
2536 .rport_delete = srp_rport_delete,
2539 static int __init srp_init_module(void)
2543 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2545 if (srp_sg_tablesize) {
2546 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2547 if (!cmd_sg_entries)
2548 cmd_sg_entries = srp_sg_tablesize;
2551 if (!cmd_sg_entries)
2552 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2554 if (cmd_sg_entries > 255) {
2555 pr_warn("Clamping cmd_sg_entries to 255\n");
2556 cmd_sg_entries = 255;
2559 if (!indirect_sg_entries)
2560 indirect_sg_entries = cmd_sg_entries;
2561 else if (indirect_sg_entries < cmd_sg_entries) {
2562 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2564 indirect_sg_entries = cmd_sg_entries;
2567 ib_srp_transport_template =
2568 srp_attach_transport(&ib_srp_transport_functions);
2569 if (!ib_srp_transport_template)
2572 ret = class_register(&srp_class);
2574 pr_err("couldn't register class infiniband_srp\n");
2575 srp_release_transport(ib_srp_transport_template);
2579 ib_sa_register_client(&srp_sa_client);
2581 ret = ib_register_client(&srp_client);
2583 pr_err("couldn't register IB client\n");
2584 srp_release_transport(ib_srp_transport_template);
2585 ib_sa_unregister_client(&srp_sa_client);
2586 class_unregister(&srp_class);
2593 static void __exit srp_cleanup_module(void)
2595 ib_unregister_client(&srp_client);
2596 ib_sa_unregister_client(&srp_sa_client);
2597 class_unregister(&srp_class);
2598 srp_release_transport(ib_srp_transport_template);
2601 module_init(srp_init_module);
2602 module_exit(srp_cleanup_module);