1 /* ------------------------------------------------------------
3 * (C) Copyright IBM Corporation 1994, 2004
4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5 * Santiago Leon (santil@us.ibm.com)
6 * Dave Boutcher (sleddog@us.ibm.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * ------------------------------------------------------------
24 * Emulation of a SCSI host adapter for Virtual I/O devices
26 * This driver supports the SCSI adapter implemented by the IBM
27 * Power5 firmware. That SCSI adapter is not a physical adapter,
28 * but allows Linux SCSI peripheral drivers to directly
29 * access devices in another logical partition on the physical system.
31 * The virtual adapter(s) are present in the open firmware device
32 * tree just like real adapters.
34 * One of the capabilities provided on these systems is the ability
35 * to DMA between partitions. The architecture states that for VSCSI,
36 * the server side is allowed to DMA to and from the client. The client
37 * is never trusted to DMA to or from the server directly.
39 * Messages are sent between partitions on a "Command/Response Queue"
40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's
41 * Senders cannot access the buffer directly, but send messages by
42 * making a hypervisor call and passing in the 16 bytes. The hypervisor
43 * puts the message in the next 16 byte space in round-robin fashion,
44 * turns on the high order bit of the message (the valid bit), and
45 * generates an interrupt to the receiver (if interrupts are turned on.)
46 * The receiver just turns off the valid bit when they have copied out
49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
50 * (IU) (as defined in the T10 standard available at www.t10.org), gets
51 * a DMA address for the message, and sends it to the server as the
52 * payload of a CRQ message. The server DMAs the SRP IU and processes it,
53 * including doing any additional data transfers. When it is done, it
54 * DMAs the SRP response back to the same address as the request came from,
55 * and sends a CRQ message back to inform the client that the request has
58 * Note that some of the underlying infrastructure is different between
59 * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
60 * the older iSeries hypervisor models. To support both, some low level
61 * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
62 * The Makefile should pick one, not two, not zero, of these.
64 * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
65 * interfaces. It would be really nice to abstract this above an RDMA
69 #include <linux/module.h>
70 #include <linux/moduleparam.h>
71 #include <linux/dma-mapping.h>
72 #include <linux/delay.h>
73 #include <linux/slab.h>
76 #include <linux/kthread.h>
77 #include <asm/firmware.h>
79 #include <scsi/scsi.h>
80 #include <scsi/scsi_cmnd.h>
81 #include <scsi/scsi_host.h>
82 #include <scsi/scsi_device.h>
83 #include <scsi/scsi_transport_srp.h>
86 /* The values below are somewhat arbitrary default values, but
87 * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
88 * Note that there are 3 bits of channel value, 6 bits of id, and
91 static int max_id = 64;
92 static int max_channel = 3;
93 static int init_timeout = 300;
94 static int login_timeout = 60;
95 static int info_timeout = 30;
96 static int abort_timeout = 60;
97 static int reset_timeout = 60;
98 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
99 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
100 static int fast_fail = 1;
101 static int client_reserve = 1;
103 static struct scsi_transport_template *ibmvscsi_transport_template;
105 #define IBMVSCSI_VERSION "1.5.9"
107 static struct ibmvscsi_ops *ibmvscsi_ops;
109 MODULE_DESCRIPTION("IBM Virtual SCSI");
110 MODULE_AUTHOR("Dave Boutcher");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(IBMVSCSI_VERSION);
114 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
115 MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
116 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
117 MODULE_PARM_DESC(max_channel, "Largest channel value");
118 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
119 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
120 module_param_named(max_requests, max_requests, int, S_IRUGO);
121 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
122 module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
123 MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
124 module_param_named(client_reserve, client_reserve, int, S_IRUGO );
125 MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
127 /* ------------------------------------------------------------
128 * Routines for the event pool and event structs
131 * initialize_event_pool: - Allocates and initializes the event pool for a host
132 * @pool: event_pool to be initialized
133 * @size: Number of events in pool
134 * @hostdata: ibmvscsi_host_data who owns the event pool
136 * Returns zero on success.
138 static int initialize_event_pool(struct event_pool *pool,
139 int size, struct ibmvscsi_host_data *hostdata)
145 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
150 dma_alloc_coherent(hostdata->dev,
151 pool->size * sizeof(*pool->iu_storage),
153 if (!pool->iu_storage) {
158 for (i = 0; i < pool->size; ++i) {
159 struct srp_event_struct *evt = &pool->events[i];
160 memset(&evt->crq, 0x00, sizeof(evt->crq));
161 atomic_set(&evt->free, 1);
162 evt->crq.valid = 0x80;
163 evt->crq.IU_length = sizeof(*evt->xfer_iu);
164 evt->crq.IU_data_ptr = pool->iu_token +
165 sizeof(*evt->xfer_iu) * i;
166 evt->xfer_iu = pool->iu_storage + i;
167 evt->hostdata = hostdata;
168 evt->ext_list = NULL;
169 evt->ext_list_token = 0;
176 * release_event_pool: - Frees memory of an event pool of a host
177 * @pool: event_pool to be released
178 * @hostdata: ibmvscsi_host_data who owns the even pool
180 * Returns zero on success.
182 static void release_event_pool(struct event_pool *pool,
183 struct ibmvscsi_host_data *hostdata)
186 for (i = 0; i < pool->size; ++i) {
187 if (atomic_read(&pool->events[i].free) != 1)
189 if (pool->events[i].ext_list) {
190 dma_free_coherent(hostdata->dev,
191 SG_ALL * sizeof(struct srp_direct_buf),
192 pool->events[i].ext_list,
193 pool->events[i].ext_list_token);
197 dev_warn(hostdata->dev, "releasing event pool with %d "
198 "events still in use?\n", in_use);
200 dma_free_coherent(hostdata->dev,
201 pool->size * sizeof(*pool->iu_storage),
202 pool->iu_storage, pool->iu_token);
206 * valid_event_struct: - Determines if event is valid.
207 * @pool: event_pool that contains the event
208 * @evt: srp_event_struct to be checked for validity
210 * Returns zero if event is invalid, one otherwise.
212 static int valid_event_struct(struct event_pool *pool,
213 struct srp_event_struct *evt)
215 int index = evt - pool->events;
216 if (index < 0 || index >= pool->size) /* outside of bounds */
218 if (evt != pool->events + index) /* unaligned */
224 * ibmvscsi_free-event_struct: - Changes status of event to "free"
225 * @pool: event_pool that contains the event
226 * @evt: srp_event_struct to be modified
229 static void free_event_struct(struct event_pool *pool,
230 struct srp_event_struct *evt)
232 if (!valid_event_struct(pool, evt)) {
233 dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
234 "(not in pool %p)\n", evt, pool->events);
237 if (atomic_inc_return(&evt->free) != 1) {
238 dev_err(evt->hostdata->dev, "Freeing event_struct %p "
239 "which is not in use!\n", evt);
245 * get_evt_struct: - Gets the next free event in pool
246 * @pool: event_pool that contains the events to be searched
248 * Returns the next event in "free" state, and NULL if none are free.
249 * Note that no synchronization is done here, we assume the host_lock
250 * will syncrhonze things.
252 static struct srp_event_struct *get_event_struct(struct event_pool *pool)
255 int poolsize = pool->size;
256 int offset = pool->next;
258 for (i = 0; i < poolsize; i++) {
259 offset = (offset + 1) % poolsize;
260 if (!atomic_dec_if_positive(&pool->events[offset].free)) {
262 return &pool->events[offset];
266 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
271 * init_event_struct: Initialize fields in an event struct that are always
274 * @done: Routine to call when the event is responded to
275 * @format: SRP or MAD format
276 * @timeout: timeout value set in the CRQ
278 static void init_event_struct(struct srp_event_struct *evt_struct,
279 void (*done) (struct srp_event_struct *),
283 evt_struct->cmnd = NULL;
284 evt_struct->cmnd_done = NULL;
285 evt_struct->sync_srp = NULL;
286 evt_struct->crq.format = format;
287 evt_struct->crq.timeout = timeout;
288 evt_struct->done = done;
291 /* ------------------------------------------------------------
292 * Routines for receiving SCSI responses from the hosting partition
296 * set_srp_direction: Set the fields in the srp related to data
297 * direction and number of buffers based on the direction in
298 * the scsi_cmnd and the number of buffers
300 static void set_srp_direction(struct scsi_cmnd *cmd,
301 struct srp_cmd *srp_cmd,
310 fmt = SRP_DATA_DESC_DIRECT;
312 fmt = SRP_DATA_DESC_INDIRECT;
313 numbuf = min(numbuf, MAX_INDIRECT_BUFS);
315 if (cmd->sc_data_direction == DMA_TO_DEVICE)
316 srp_cmd->data_out_desc_cnt = numbuf;
318 srp_cmd->data_in_desc_cnt = numbuf;
321 if (cmd->sc_data_direction == DMA_TO_DEVICE)
322 srp_cmd->buf_fmt = fmt << 4;
324 srp_cmd->buf_fmt = fmt;
328 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
329 * @cmd: srp_cmd whose additional_data member will be unmapped
330 * @dev: device for which the memory is mapped
333 static void unmap_cmd_data(struct srp_cmd *cmd,
334 struct srp_event_struct *evt_struct,
339 out_fmt = cmd->buf_fmt >> 4;
340 in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
342 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
345 if (evt_struct->cmnd)
346 scsi_dma_unmap(evt_struct->cmnd);
349 static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
350 struct srp_direct_buf *md)
353 struct scatterlist *sg;
354 u64 total_length = 0;
356 scsi_for_each_sg(cmd, sg, nseg, i) {
357 struct srp_direct_buf *descr = md + i;
358 descr->va = sg_dma_address(sg);
359 descr->len = sg_dma_len(sg);
361 total_length += sg_dma_len(sg);
367 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
368 * @cmd: Scsi_Cmnd with the scatterlist
369 * @srp_cmd: srp_cmd that contains the memory descriptor
370 * @dev: device for which to map dma memory
372 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
373 * Returns 1 on success.
375 static int map_sg_data(struct scsi_cmnd *cmd,
376 struct srp_event_struct *evt_struct,
377 struct srp_cmd *srp_cmd, struct device *dev)
381 u64 total_length = 0;
382 struct srp_direct_buf *data =
383 (struct srp_direct_buf *) srp_cmd->add_data;
384 struct srp_indirect_buf *indirect =
385 (struct srp_indirect_buf *) data;
387 sg_mapped = scsi_dma_map(cmd);
390 else if (sg_mapped < 0)
393 set_srp_direction(cmd, srp_cmd, sg_mapped);
395 /* special case; we can use a single direct descriptor */
396 if (sg_mapped == 1) {
397 map_sg_list(cmd, sg_mapped, data);
401 indirect->table_desc.va = 0;
402 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
403 indirect->table_desc.key = 0;
405 if (sg_mapped <= MAX_INDIRECT_BUFS) {
406 total_length = map_sg_list(cmd, sg_mapped,
407 &indirect->desc_list[0]);
408 indirect->len = total_length;
412 /* get indirect table */
413 if (!evt_struct->ext_list) {
414 evt_struct->ext_list = (struct srp_direct_buf *)
415 dma_alloc_coherent(dev,
416 SG_ALL * sizeof(struct srp_direct_buf),
417 &evt_struct->ext_list_token, 0);
418 if (!evt_struct->ext_list) {
419 if (!firmware_has_feature(FW_FEATURE_CMO))
420 sdev_printk(KERN_ERR, cmd->device,
421 "Can't allocate memory "
422 "for indirect table\n");
428 total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
430 indirect->len = total_length;
431 indirect->table_desc.va = evt_struct->ext_list_token;
432 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
433 memcpy(indirect->desc_list, evt_struct->ext_list,
434 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
439 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
440 * @cmd: struct scsi_cmnd with the memory to be mapped
441 * @srp_cmd: srp_cmd that contains the memory descriptor
442 * @dev: dma device for which to map dma memory
444 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
445 * Returns 1 on success.
447 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
448 struct srp_event_struct *evt_struct,
449 struct srp_cmd *srp_cmd, struct device *dev)
451 switch (cmd->sc_data_direction) {
452 case DMA_FROM_DEVICE:
457 case DMA_BIDIRECTIONAL:
458 sdev_printk(KERN_ERR, cmd->device,
459 "Can't map DMA_BIDIRECTIONAL to read/write\n");
462 sdev_printk(KERN_ERR, cmd->device,
463 "Unknown data direction 0x%02x; can't map!\n",
464 cmd->sc_data_direction);
468 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
472 * purge_requests: Our virtual adapter just shut down. purge any sent requests
473 * @hostdata: the adapter
475 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
477 struct srp_event_struct *evt;
480 spin_lock_irqsave(hostdata->host->host_lock, flags);
481 while (!list_empty(&hostdata->sent)) {
482 evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
483 list_del(&evt->list);
484 del_timer(&evt->timer);
486 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
488 evt->cmnd->result = (error_code << 16);
489 unmap_cmd_data(&evt->iu.srp.cmd, evt,
492 evt->cmnd_done(evt->cmnd);
493 } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
494 evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
496 free_event_struct(&evt->hostdata->pool, evt);
497 spin_lock_irqsave(hostdata->host->host_lock, flags);
499 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
503 * ibmvscsi_reset_host - Reset the connection to the server
504 * @hostdata: struct ibmvscsi_host_data to reset
506 static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
508 scsi_block_requests(hostdata->host);
509 atomic_set(&hostdata->request_limit, 0);
511 purge_requests(hostdata, DID_ERROR);
512 hostdata->reset_crq = 1;
513 wake_up(&hostdata->work_wait_q);
517 * ibmvscsi_timeout - Internal command timeout handler
518 * @evt_struct: struct srp_event_struct that timed out
520 * Called when an internally generated command times out
522 static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
524 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
526 dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
527 evt_struct->iu.srp.cmd.opcode);
529 ibmvscsi_reset_host(hostdata);
533 /* ------------------------------------------------------------
534 * Routines for sending and receiving SRPs
537 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
538 * @evt_struct: evt_struct to be sent
539 * @hostdata: ibmvscsi_host_data of host
540 * @timeout: timeout in seconds - 0 means do not time command
542 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
543 * Note that this routine assumes that host_lock is held for synchronization
545 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
546 struct ibmvscsi_host_data *hostdata,
547 unsigned long timeout)
549 u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
550 int request_status = 0;
554 /* If we have exhausted our request limit, just fail this request,
555 * unless it is for a reset or abort.
556 * Note that there are rare cases involving driver generated requests
557 * (such as task management requests) that the mid layer may think we
558 * can handle more requests (can_queue) when we actually can't
560 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
563 atomic_dec_if_positive(&hostdata->request_limit);
564 /* If request limit was -1 when we started, it is now even
567 if (request_status < -1)
569 /* Otherwise, we may have run out of requests. */
570 /* If request limit was 0 when we started the adapter is in the
571 * process of performing a login with the server adapter, or
572 * we may have run out of requests.
574 else if (request_status == -1 &&
575 evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
577 /* Abort and reset calls should make it through.
578 * Nothing except abort and reset should use the last two
579 * slots unless we had two or less to begin with.
581 else if (request_status < 2 &&
582 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
583 /* In the case that we have less than two requests
584 * available, check the server limit as a combination
585 * of the request limit and the number of requests
586 * in-flight (the size of the send list). If the
587 * server limit is greater than 2, return busy so
588 * that the last two are reserved for reset and abort.
590 int server_limit = request_status;
591 struct srp_event_struct *tmp_evt;
593 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
597 if (server_limit > 2)
602 /* Copy the IU into the transfer area */
603 *evt_struct->xfer_iu = evt_struct->iu;
604 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
606 /* Add this to the sent list. We need to do this
607 * before we actually send
608 * in case it comes back REALLY fast
610 list_add_tail(&evt_struct->list, &hostdata->sent);
612 init_timer(&evt_struct->timer);
614 evt_struct->timer.data = (unsigned long) evt_struct;
615 evt_struct->timer.expires = jiffies + (timeout * HZ);
616 evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
617 add_timer(&evt_struct->timer);
621 ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
622 list_del(&evt_struct->list);
623 del_timer(&evt_struct->timer);
625 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
626 * Firmware will send a CRQ with a transport event (0xFF) to
627 * tell this client what has happened to the transport. This
628 * will be handled in ibmvscsi_handle_crq()
630 if (rc == H_CLOSED) {
631 dev_warn(hostdata->dev, "send warning. "
632 "Receive queue closed, will retry.\n");
635 dev_err(hostdata->dev, "send error %d\n", rc);
637 atomic_inc(&hostdata->request_limit);
644 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
646 free_event_struct(&hostdata->pool, evt_struct);
647 if (srp_req && request_status != -1)
648 atomic_inc(&hostdata->request_limit);
649 return SCSI_MLQUEUE_HOST_BUSY;
652 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
654 if (evt_struct->cmnd != NULL) {
655 evt_struct->cmnd->result = DID_ERROR << 16;
656 evt_struct->cmnd_done(evt_struct->cmnd);
657 } else if (evt_struct->done)
658 evt_struct->done(evt_struct);
660 free_event_struct(&hostdata->pool, evt_struct);
665 * handle_cmd_rsp: - Handle responses from commands
666 * @evt_struct: srp_event_struct to be handled
668 * Used as a callback by when sending scsi cmds.
669 * Gets called by ibmvscsi_handle_crq()
671 static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
673 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
674 struct scsi_cmnd *cmnd = evt_struct->cmnd;
676 if (unlikely(rsp->opcode != SRP_RSP)) {
677 if (printk_ratelimit())
678 dev_warn(evt_struct->hostdata->dev,
679 "bad SRP RSP type %d\n", rsp->opcode);
683 cmnd->result |= rsp->status;
684 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
685 memcpy(cmnd->sense_buffer,
687 rsp->sense_data_len);
688 unmap_cmd_data(&evt_struct->iu.srp.cmd,
690 evt_struct->hostdata->dev);
692 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
693 scsi_set_resid(cmnd, rsp->data_out_res_cnt);
694 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
695 scsi_set_resid(cmnd, rsp->data_in_res_cnt);
698 if (evt_struct->cmnd_done)
699 evt_struct->cmnd_done(cmnd);
703 * lun_from_dev: - Returns the lun of the scsi device
704 * @dev: struct scsi_device
707 static inline u16 lun_from_dev(struct scsi_device *dev)
709 return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
713 * ibmvscsi_queue: - The queuecommand function of the scsi template
714 * @cmd: struct scsi_cmnd to be executed
715 * @done: Callback function to be called when cmd is completed
717 static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
718 void (*done) (struct scsi_cmnd *))
720 struct srp_cmd *srp_cmd;
721 struct srp_event_struct *evt_struct;
722 struct srp_indirect_buf *indirect;
723 struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
724 u16 lun = lun_from_dev(cmnd->device);
727 cmnd->result = (DID_OK << 16);
728 evt_struct = get_event_struct(&hostdata->pool);
730 return SCSI_MLQUEUE_HOST_BUSY;
732 /* Set up the actual SRP IU */
733 srp_cmd = &evt_struct->iu.srp.cmd;
734 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
735 srp_cmd->opcode = SRP_CMD;
736 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
737 srp_cmd->lun = ((u64) lun) << 48;
739 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
740 if (!firmware_has_feature(FW_FEATURE_CMO))
741 sdev_printk(KERN_ERR, cmnd->device,
742 "couldn't convert cmd to srp_cmd\n");
743 free_event_struct(&hostdata->pool, evt_struct);
744 return SCSI_MLQUEUE_HOST_BUSY;
747 init_event_struct(evt_struct,
750 cmnd->request->timeout/HZ);
752 evt_struct->cmnd = cmnd;
753 evt_struct->cmnd_done = done;
755 /* Fix up dma address of the buffer itself */
756 indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
757 out_fmt = srp_cmd->buf_fmt >> 4;
758 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
759 if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
760 out_fmt == SRP_DATA_DESC_INDIRECT) &&
761 indirect->table_desc.va == 0) {
762 indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
763 offsetof(struct srp_cmd, add_data) +
764 offsetof(struct srp_indirect_buf, desc_list);
767 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
770 static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
772 /* ------------------------------------------------------------
773 * Routines for driver initialization
777 * map_persist_bufs: - Pre-map persistent data for adapter logins
778 * @hostdata: ibmvscsi_host_data of host
780 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
781 * Return 1 on error, 0 on success.
783 static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
786 hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
787 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
789 if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
790 dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
794 hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
795 &hostdata->madapter_info,
796 sizeof(hostdata->madapter_info),
798 if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
799 dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
800 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
801 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
809 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
810 * @hostdata: ibmvscsi_host_data of host
812 * Unmap the capabilities and adapter info DMA buffers
814 static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
816 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
817 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
819 dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
820 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
824 * login_rsp: - Handle response to SRP login request
825 * @evt_struct: srp_event_struct with the response
827 * Used as a "done" callback by when sending srp_login. Gets called
828 * by ibmvscsi_handle_crq()
830 static void login_rsp(struct srp_event_struct *evt_struct)
832 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
833 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
834 case SRP_LOGIN_RSP: /* it worked! */
836 case SRP_LOGIN_REJ: /* refused! */
837 dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
838 evt_struct->xfer_iu->srp.login_rej.reason);
840 atomic_set(&hostdata->request_limit, -1);
843 dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
844 evt_struct->xfer_iu->srp.login_rsp.opcode);
846 atomic_set(&hostdata->request_limit, -1);
850 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
851 hostdata->client_migrated = 0;
853 /* Now we know what the real request-limit is.
854 * This value is set rather than added to request_limit because
855 * request_limit could have been set to -1 by this client.
857 atomic_set(&hostdata->request_limit,
858 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
860 /* If we had any pending I/Os, kick them */
861 scsi_unblock_requests(hostdata->host);
865 * send_srp_login: - Sends the srp login
866 * @hostdata: ibmvscsi_host_data of host
868 * Returns zero if successful.
870 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
874 struct srp_login_req *login;
875 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
878 init_event_struct(evt_struct, login_rsp,
879 VIOSRP_SRP_FORMAT, login_timeout);
881 login = &evt_struct->iu.srp.login_req;
882 memset(login, 0, sizeof(*login));
883 login->opcode = SRP_LOGIN_REQ;
884 login->req_it_iu_len = sizeof(union srp_iu);
885 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
887 spin_lock_irqsave(hostdata->host->host_lock, flags);
888 /* Start out with a request limit of 0, since this is negotiated in
889 * the login request we are just sending and login requests always
890 * get sent by the driver regardless of request_limit.
892 atomic_set(&hostdata->request_limit, 0);
894 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
895 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
896 dev_info(hostdata->dev, "sent SRP login\n");
901 * capabilities_rsp: - Handle response to MAD adapter capabilities request
902 * @evt_struct: srp_event_struct with the response
904 * Used as a "done" callback by when sending adapter_info.
906 static void capabilities_rsp(struct srp_event_struct *evt_struct)
908 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
910 if (evt_struct->xfer_iu->mad.capabilities.common.status) {
911 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
912 evt_struct->xfer_iu->mad.capabilities.common.status);
914 if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
915 dev_info(hostdata->dev, "Partition migration not supported\n");
917 if (client_reserve) {
918 if (hostdata->caps.reserve.common.server_support ==
920 dev_info(hostdata->dev, "Client reserve enabled\n");
922 dev_info(hostdata->dev, "Client reserve not supported\n");
926 send_srp_login(hostdata);
930 * send_mad_capabilities: - Sends the mad capabilities request
931 * and stores the result so it can be retrieved with
932 * @hostdata: ibmvscsi_host_data of host
934 static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
936 struct viosrp_capabilities *req;
937 struct srp_event_struct *evt_struct;
939 struct device_node *of_node = hostdata->dev->of_node;
940 const char *location;
942 evt_struct = get_event_struct(&hostdata->pool);
945 init_event_struct(evt_struct, capabilities_rsp,
946 VIOSRP_MAD_FORMAT, info_timeout);
948 req = &evt_struct->iu.mad.capabilities;
949 memset(req, 0, sizeof(*req));
951 hostdata->caps.flags = CAP_LIST_SUPPORTED;
952 if (hostdata->client_migrated)
953 hostdata->caps.flags |= CLIENT_MIGRATED;
955 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
956 sizeof(hostdata->caps.name));
957 hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
959 location = of_get_property(of_node, "ibm,loc-code", NULL);
960 location = location ? location : dev_name(hostdata->dev);
961 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
962 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
964 req->common.type = VIOSRP_CAPABILITIES_TYPE;
965 req->buffer = hostdata->caps_addr;
967 hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
968 hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
969 hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
970 hostdata->caps.migration.ecl = 1;
972 if (client_reserve) {
973 hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
974 hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
975 hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
976 hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
977 req->common.length = sizeof(hostdata->caps);
979 req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
981 spin_lock_irqsave(hostdata->host->host_lock, flags);
982 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
983 dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
984 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
988 * fast_fail_rsp: - Handle response to MAD enable fast fail
989 * @evt_struct: srp_event_struct with the response
991 * Used as a "done" callback by when sending enable fast fail. Gets called
992 * by ibmvscsi_handle_crq()
994 static void fast_fail_rsp(struct srp_event_struct *evt_struct)
996 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
997 u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
999 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1000 dev_err(hostdata->dev, "fast_fail not supported in server\n");
1001 else if (status == VIOSRP_MAD_FAILED)
1002 dev_err(hostdata->dev, "fast_fail request failed\n");
1003 else if (status != VIOSRP_MAD_SUCCESS)
1004 dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1006 send_mad_capabilities(hostdata);
1010 * init_host - Start host initialization
1011 * @hostdata: ibmvscsi_host_data of host
1013 * Returns zero if successful.
1015 static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1018 unsigned long flags;
1019 struct viosrp_fast_fail *fast_fail_mad;
1020 struct srp_event_struct *evt_struct;
1023 send_mad_capabilities(hostdata);
1027 evt_struct = get_event_struct(&hostdata->pool);
1028 BUG_ON(!evt_struct);
1030 init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1032 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1033 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1034 fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
1035 fast_fail_mad->common.length = sizeof(*fast_fail_mad);
1037 spin_lock_irqsave(hostdata->host->host_lock, flags);
1038 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1039 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1044 * adapter_info_rsp: - Handle response to MAD adapter info request
1045 * @evt_struct: srp_event_struct with the response
1047 * Used as a "done" callback by when sending adapter_info. Gets called
1048 * by ibmvscsi_handle_crq()
1050 static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1052 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1054 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1055 dev_err(hostdata->dev, "error %d getting adapter info\n",
1056 evt_struct->xfer_iu->mad.adapter_info.common.status);
1058 dev_info(hostdata->dev, "host srp version: %s, "
1059 "host partition %s (%d), OS %d, max io %u\n",
1060 hostdata->madapter_info.srp_version,
1061 hostdata->madapter_info.partition_name,
1062 hostdata->madapter_info.partition_number,
1063 hostdata->madapter_info.os_type,
1064 hostdata->madapter_info.port_max_txu[0]);
1066 if (hostdata->madapter_info.port_max_txu[0])
1067 hostdata->host->max_sectors =
1068 hostdata->madapter_info.port_max_txu[0] >> 9;
1070 if (hostdata->madapter_info.os_type == 3 &&
1071 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1072 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1073 hostdata->madapter_info.srp_version);
1074 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1076 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1079 if (hostdata->madapter_info.os_type == 3) {
1080 enable_fast_fail(hostdata);
1085 send_srp_login(hostdata);
1089 * send_mad_adapter_info: - Sends the mad adapter info request
1090 * and stores the result so it can be retrieved with
1091 * sysfs. We COULD consider causing a failure if the
1092 * returned SRP version doesn't match ours.
1093 * @hostdata: ibmvscsi_host_data of host
1095 * Returns zero if successful.
1097 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1099 struct viosrp_adapter_info *req;
1100 struct srp_event_struct *evt_struct;
1101 unsigned long flags;
1103 evt_struct = get_event_struct(&hostdata->pool);
1104 BUG_ON(!evt_struct);
1106 init_event_struct(evt_struct,
1111 req = &evt_struct->iu.mad.adapter_info;
1112 memset(req, 0x00, sizeof(*req));
1114 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
1115 req->common.length = sizeof(hostdata->madapter_info);
1116 req->buffer = hostdata->adapter_info_addr;
1118 spin_lock_irqsave(hostdata->host->host_lock, flags);
1119 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1120 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1121 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1125 * init_adapter: Start virtual adapter initialization sequence
1128 static void init_adapter(struct ibmvscsi_host_data *hostdata)
1130 send_mad_adapter_info(hostdata);
1134 * sync_completion: Signal that a synchronous command has completed
1135 * Note that after returning from this call, the evt_struct is freed.
1136 * the caller waiting on this completion shouldn't touch the evt_struct
1139 static void sync_completion(struct srp_event_struct *evt_struct)
1141 /* copy the response back */
1142 if (evt_struct->sync_srp)
1143 *evt_struct->sync_srp = *evt_struct->xfer_iu;
1145 complete(&evt_struct->comp);
1149 * ibmvscsi_abort: Abort a command...from scsi host template
1150 * send this over to the server and wait synchronously for the response
1152 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1154 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1155 struct srp_tsk_mgmt *tsk_mgmt;
1156 struct srp_event_struct *evt;
1157 struct srp_event_struct *tmp_evt, *found_evt;
1158 union viosrp_iu srp_rsp;
1160 unsigned long flags;
1161 u16 lun = lun_from_dev(cmd->device);
1162 unsigned long wait_switch = 0;
1164 /* First, find this command in our sent list so we can figure
1165 * out the correct tag
1167 spin_lock_irqsave(hostdata->host->host_lock, flags);
1168 wait_switch = jiffies + (init_timeout * HZ);
1171 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1172 if (tmp_evt->cmnd == cmd) {
1173 found_evt = tmp_evt;
1179 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1183 evt = get_event_struct(&hostdata->pool);
1185 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1186 sdev_printk(KERN_ERR, cmd->device,
1187 "failed to allocate abort event\n");
1191 init_event_struct(evt,
1196 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1198 /* Set up an abort SRP command */
1199 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1200 tsk_mgmt->opcode = SRP_TSK_MGMT;
1201 tsk_mgmt->lun = ((u64) lun) << 48;
1202 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
1203 tsk_mgmt->task_tag = (u64) found_evt;
1205 evt->sync_srp = &srp_rsp;
1207 init_completion(&evt->comp);
1208 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1210 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1213 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1215 spin_lock_irqsave(hostdata->host->host_lock, flags);
1216 } while (time_before(jiffies, wait_switch));
1218 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1221 sdev_printk(KERN_ERR, cmd->device,
1222 "failed to send abort() event. rc=%d\n", rsp_rc);
1226 sdev_printk(KERN_INFO, cmd->device,
1227 "aborting command. lun 0x%llx, tag 0x%llx\n",
1228 (((u64) lun) << 48), (u64) found_evt);
1230 wait_for_completion(&evt->comp);
1232 /* make sure we got a good response */
1233 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1234 if (printk_ratelimit())
1235 sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
1236 srp_rsp.srp.rsp.opcode);
1240 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1241 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1243 rsp_rc = srp_rsp.srp.rsp.status;
1246 if (printk_ratelimit())
1247 sdev_printk(KERN_WARNING, cmd->device,
1248 "abort code %d for task tag 0x%llx\n",
1249 rsp_rc, tsk_mgmt->task_tag);
1253 /* Because we dropped the spinlock above, it's possible
1254 * The event is no longer in our list. Make sure it didn't
1255 * complete while we were aborting
1257 spin_lock_irqsave(hostdata->host->host_lock, flags);
1259 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1260 if (tmp_evt->cmnd == cmd) {
1261 found_evt = tmp_evt;
1266 if (found_evt == NULL) {
1267 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1268 sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
1269 tsk_mgmt->task_tag);
1273 sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
1274 tsk_mgmt->task_tag);
1276 cmd->result = (DID_ABORT << 16);
1277 list_del(&found_evt->list);
1278 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1279 found_evt->hostdata->dev);
1280 free_event_struct(&found_evt->hostdata->pool, found_evt);
1281 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1282 atomic_inc(&hostdata->request_limit);
1287 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
1288 * template send this over to the server and wait synchronously for the
1291 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1293 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1294 struct srp_tsk_mgmt *tsk_mgmt;
1295 struct srp_event_struct *evt;
1296 struct srp_event_struct *tmp_evt, *pos;
1297 union viosrp_iu srp_rsp;
1299 unsigned long flags;
1300 u16 lun = lun_from_dev(cmd->device);
1301 unsigned long wait_switch = 0;
1303 spin_lock_irqsave(hostdata->host->host_lock, flags);
1304 wait_switch = jiffies + (init_timeout * HZ);
1306 evt = get_event_struct(&hostdata->pool);
1308 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1309 sdev_printk(KERN_ERR, cmd->device,
1310 "failed to allocate reset event\n");
1314 init_event_struct(evt,
1319 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1321 /* Set up a lun reset SRP command */
1322 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1323 tsk_mgmt->opcode = SRP_TSK_MGMT;
1324 tsk_mgmt->lun = ((u64) lun) << 48;
1325 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1327 evt->sync_srp = &srp_rsp;
1329 init_completion(&evt->comp);
1330 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1332 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1335 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1337 spin_lock_irqsave(hostdata->host->host_lock, flags);
1338 } while (time_before(jiffies, wait_switch));
1340 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1343 sdev_printk(KERN_ERR, cmd->device,
1344 "failed to send reset event. rc=%d\n", rsp_rc);
1348 sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
1349 (((u64) lun) << 48));
1351 wait_for_completion(&evt->comp);
1353 /* make sure we got a good response */
1354 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1355 if (printk_ratelimit())
1356 sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
1357 srp_rsp.srp.rsp.opcode);
1361 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1362 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1364 rsp_rc = srp_rsp.srp.rsp.status;
1367 if (printk_ratelimit())
1368 sdev_printk(KERN_WARNING, cmd->device,
1369 "reset code %d for task tag 0x%llx\n",
1370 rsp_rc, tsk_mgmt->task_tag);
1374 /* We need to find all commands for this LUN that have not yet been
1375 * responded to, and fail them with DID_RESET
1377 spin_lock_irqsave(hostdata->host->host_lock, flags);
1378 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1379 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1381 tmp_evt->cmnd->result = (DID_RESET << 16);
1382 list_del(&tmp_evt->list);
1383 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1384 tmp_evt->hostdata->dev);
1385 free_event_struct(&tmp_evt->hostdata->pool,
1387 atomic_inc(&hostdata->request_limit);
1388 if (tmp_evt->cmnd_done)
1389 tmp_evt->cmnd_done(tmp_evt->cmnd);
1390 else if (tmp_evt->done)
1391 tmp_evt->done(tmp_evt);
1394 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1399 * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
1400 * @cmd: struct scsi_cmnd having problems
1402 static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
1404 unsigned long wait_switch = 0;
1405 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1407 dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
1409 ibmvscsi_reset_host(hostdata);
1411 for (wait_switch = jiffies + (init_timeout * HZ);
1412 time_before(jiffies, wait_switch) &&
1413 atomic_read(&hostdata->request_limit) < 2;) {
1418 if (atomic_read(&hostdata->request_limit) <= 0)
1425 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
1426 * @crq: Command/Response queue
1427 * @hostdata: ibmvscsi_host_data of host
1430 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1431 struct ibmvscsi_host_data *hostdata)
1434 unsigned long flags;
1435 struct srp_event_struct *evt_struct =
1436 (struct srp_event_struct *)crq->IU_data_ptr;
1437 switch (crq->valid) {
1438 case 0xC0: /* initialization */
1439 switch (crq->format) {
1440 case 0x01: /* Initialization message */
1441 dev_info(hostdata->dev, "partner initialized\n");
1442 /* Send back a response */
1443 if ((rc = ibmvscsi_ops->send_crq(hostdata,
1444 0xC002000000000000LL, 0)) == 0) {
1446 init_adapter(hostdata);
1448 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1452 case 0x02: /* Initialization response */
1453 dev_info(hostdata->dev, "partner initialization complete\n");
1456 init_adapter(hostdata);
1459 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
1462 case 0xFF: /* Hypervisor telling us the connection is closed */
1463 scsi_block_requests(hostdata->host);
1464 atomic_set(&hostdata->request_limit, 0);
1465 if (crq->format == 0x06) {
1466 /* We need to re-setup the interpartition connection */
1467 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1468 hostdata->client_migrated = 1;
1469 hostdata->reenable_crq = 1;
1470 purge_requests(hostdata, DID_REQUEUE);
1471 wake_up(&hostdata->work_wait_q);
1473 dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1475 ibmvscsi_reset_host(hostdata);
1478 case 0x80: /* real payload */
1481 dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
1486 /* The only kind of payload CRQs we should get are responses to
1487 * things we send. Make sure this response is to something we
1490 if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1491 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1492 (void *)crq->IU_data_ptr);
1496 if (atomic_read(&evt_struct->free)) {
1497 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1498 (void *)crq->IU_data_ptr);
1502 if (crq->format == VIOSRP_SRP_FORMAT)
1503 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
1504 &hostdata->request_limit);
1506 del_timer(&evt_struct->timer);
1508 if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
1509 evt_struct->cmnd->result = DID_ERROR << 16;
1510 if (evt_struct->done)
1511 evt_struct->done(evt_struct);
1513 dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
1516 * Lock the host_lock before messing with these structures, since we
1517 * are running in a task context
1519 spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1520 list_del(&evt_struct->list);
1521 free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1522 spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1526 * ibmvscsi_get_host_config: Send the command to the server to get host
1527 * configuration data. The data is opaque to us.
1529 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1530 unsigned char *buffer, int length)
1532 struct viosrp_host_config *host_config;
1533 struct srp_event_struct *evt_struct;
1534 unsigned long flags;
1538 evt_struct = get_event_struct(&hostdata->pool);
1540 dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
1544 init_event_struct(evt_struct,
1549 host_config = &evt_struct->iu.mad.host_config;
1551 /* The transport length field is only 16-bit */
1552 length = min(0xffff, length);
1554 /* Set up a lun reset SRP command */
1555 memset(host_config, 0x00, sizeof(*host_config));
1556 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1557 host_config->common.length = length;
1558 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
1562 if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
1563 if (!firmware_has_feature(FW_FEATURE_CMO))
1564 dev_err(hostdata->dev,
1565 "dma_mapping error getting host config\n");
1566 free_event_struct(&hostdata->pool, evt_struct);
1570 init_completion(&evt_struct->comp);
1571 spin_lock_irqsave(hostdata->host->host_lock, flags);
1572 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1573 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1575 wait_for_completion(&evt_struct->comp);
1576 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
1582 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1583 * @sdev: struct scsi_device device to configure
1585 * Enable allow_restart for a device if it is a disk. Adjust the
1586 * queue_depth here also as is required by the documentation for
1587 * struct scsi_host_template.
1589 static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1591 struct Scsi_Host *shost = sdev->host;
1592 unsigned long lock_flags = 0;
1594 spin_lock_irqsave(shost->host_lock, lock_flags);
1595 if (sdev->type == TYPE_DISK) {
1596 sdev->allow_restart = 1;
1597 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1599 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1600 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1605 * ibmvscsi_change_queue_depth - Change the device's queue depth
1606 * @sdev: scsi device struct
1607 * @qdepth: depth to set
1608 * @reason: calling context
1613 static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
1616 if (reason != SCSI_QDEPTH_DEFAULT)
1619 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1620 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1622 scsi_adjust_queue_depth(sdev, 0, qdepth);
1623 return sdev->queue_depth;
1626 /* ------------------------------------------------------------
1629 static ssize_t show_host_vhost_loc(struct device *dev,
1630 struct device_attribute *attr, char *buf)
1632 struct Scsi_Host *shost = class_to_shost(dev);
1633 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1636 len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1637 hostdata->caps.loc);
1641 static struct device_attribute ibmvscsi_host_vhost_loc = {
1643 .name = "vhost_loc",
1646 .show = show_host_vhost_loc,
1649 static ssize_t show_host_vhost_name(struct device *dev,
1650 struct device_attribute *attr, char *buf)
1652 struct Scsi_Host *shost = class_to_shost(dev);
1653 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1656 len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1657 hostdata->caps.name);
1661 static struct device_attribute ibmvscsi_host_vhost_name = {
1663 .name = "vhost_name",
1666 .show = show_host_vhost_name,
1669 static ssize_t show_host_srp_version(struct device *dev,
1670 struct device_attribute *attr, char *buf)
1672 struct Scsi_Host *shost = class_to_shost(dev);
1673 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1676 len = snprintf(buf, PAGE_SIZE, "%s\n",
1677 hostdata->madapter_info.srp_version);
1681 static struct device_attribute ibmvscsi_host_srp_version = {
1683 .name = "srp_version",
1686 .show = show_host_srp_version,
1689 static ssize_t show_host_partition_name(struct device *dev,
1690 struct device_attribute *attr,
1693 struct Scsi_Host *shost = class_to_shost(dev);
1694 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1697 len = snprintf(buf, PAGE_SIZE, "%s\n",
1698 hostdata->madapter_info.partition_name);
1702 static struct device_attribute ibmvscsi_host_partition_name = {
1704 .name = "partition_name",
1707 .show = show_host_partition_name,
1710 static ssize_t show_host_partition_number(struct device *dev,
1711 struct device_attribute *attr,
1714 struct Scsi_Host *shost = class_to_shost(dev);
1715 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1718 len = snprintf(buf, PAGE_SIZE, "%d\n",
1719 hostdata->madapter_info.partition_number);
1723 static struct device_attribute ibmvscsi_host_partition_number = {
1725 .name = "partition_number",
1728 .show = show_host_partition_number,
1731 static ssize_t show_host_mad_version(struct device *dev,
1732 struct device_attribute *attr, char *buf)
1734 struct Scsi_Host *shost = class_to_shost(dev);
1735 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1738 len = snprintf(buf, PAGE_SIZE, "%d\n",
1739 hostdata->madapter_info.mad_version);
1743 static struct device_attribute ibmvscsi_host_mad_version = {
1745 .name = "mad_version",
1748 .show = show_host_mad_version,
1751 static ssize_t show_host_os_type(struct device *dev,
1752 struct device_attribute *attr, char *buf)
1754 struct Scsi_Host *shost = class_to_shost(dev);
1755 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1758 len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
1762 static struct device_attribute ibmvscsi_host_os_type = {
1767 .show = show_host_os_type,
1770 static ssize_t show_host_config(struct device *dev,
1771 struct device_attribute *attr, char *buf)
1773 struct Scsi_Host *shost = class_to_shost(dev);
1774 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1776 /* returns null-terminated host config data */
1777 if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
1783 static struct device_attribute ibmvscsi_host_config = {
1788 .show = show_host_config,
1791 static struct device_attribute *ibmvscsi_attrs[] = {
1792 &ibmvscsi_host_vhost_loc,
1793 &ibmvscsi_host_vhost_name,
1794 &ibmvscsi_host_srp_version,
1795 &ibmvscsi_host_partition_name,
1796 &ibmvscsi_host_partition_number,
1797 &ibmvscsi_host_mad_version,
1798 &ibmvscsi_host_os_type,
1799 &ibmvscsi_host_config,
1803 /* ------------------------------------------------------------
1804 * SCSI driver registration
1806 static struct scsi_host_template driver_template = {
1807 .module = THIS_MODULE,
1808 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
1809 .proc_name = "ibmvscsi",
1810 .queuecommand = ibmvscsi_queuecommand,
1811 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1812 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1813 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
1814 .slave_configure = ibmvscsi_slave_configure,
1815 .change_queue_depth = ibmvscsi_change_queue_depth,
1816 .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
1817 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1819 .sg_tablesize = SG_ALL,
1820 .use_clustering = ENABLE_CLUSTERING,
1821 .shost_attrs = ibmvscsi_attrs,
1825 * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
1827 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1830 * Number of bytes of IO data the driver will need to perform well.
1832 static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
1834 /* iu_storage data allocated in initialize_event_pool */
1835 unsigned long desired_io = max_events * sizeof(union viosrp_iu);
1837 /* add io space for sg data */
1838 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
1839 IBMVSCSI_CMDS_PER_LUN_DEFAULT);
1844 static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
1847 char *action = "reset";
1849 if (hostdata->reset_crq) {
1851 hostdata->reset_crq = 0;
1853 rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
1855 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
1856 vio_enable_interrupts(to_vio_dev(hostdata->dev));
1857 } else if (hostdata->reenable_crq) {
1860 rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata);
1861 hostdata->reenable_crq = 0;
1863 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
1868 atomic_set(&hostdata->request_limit, -1);
1869 dev_err(hostdata->dev, "error after %s\n", action);
1872 scsi_unblock_requests(hostdata->host);
1875 static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
1877 if (kthread_should_stop())
1879 else if (hostdata->reset_crq) {
1882 } else if (hostdata->reenable_crq) {
1890 static int ibmvscsi_work(void *data)
1892 struct ibmvscsi_host_data *hostdata = data;
1895 set_user_nice(current, -20);
1898 rc = wait_event_interruptible(hostdata->work_wait_q,
1899 ibmvscsi_work_to_do(hostdata));
1903 if (kthread_should_stop())
1906 ibmvscsi_do_work(hostdata);
1913 * Called by bus code for each adapter
1915 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1917 struct ibmvscsi_host_data *hostdata;
1918 struct Scsi_Host *host;
1919 struct device *dev = &vdev->dev;
1920 struct srp_rport_identifiers ids;
1921 struct srp_rport *rport;
1922 unsigned long wait_switch = 0;
1925 dev_set_drvdata(&vdev->dev, NULL);
1927 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1929 dev_err(&vdev->dev, "couldn't allocate host data\n");
1930 goto scsi_host_alloc_failed;
1933 host->transportt = ibmvscsi_transport_template;
1934 hostdata = shost_priv(host);
1935 memset(hostdata, 0x00, sizeof(*hostdata));
1936 INIT_LIST_HEAD(&hostdata->sent);
1937 init_waitqueue_head(&hostdata->work_wait_q);
1938 hostdata->host = host;
1939 hostdata->dev = dev;
1940 atomic_set(&hostdata->request_limit, -1);
1941 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1943 if (map_persist_bufs(hostdata)) {
1944 dev_err(&vdev->dev, "couldn't map persistent buffers\n");
1945 goto persist_bufs_failed;
1948 hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
1949 "ibmvscsi", host->host_no);
1951 if (IS_ERR(hostdata->work_thread)) {
1952 dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
1953 PTR_ERR(hostdata->work_thread));
1954 goto init_crq_failed;
1957 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1958 if (rc != 0 && rc != H_RESOURCE) {
1959 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1962 if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
1963 dev_err(&vdev->dev, "couldn't initialize event pool\n");
1964 goto init_pool_failed;
1968 host->max_id = max_id;
1969 host->max_channel = max_channel;
1970 host->max_cmd_len = 16;
1972 if (scsi_add_host(hostdata->host, hostdata->dev))
1973 goto add_host_failed;
1975 /* we don't have a proper target_port_id so let's use the fake one */
1976 memcpy(ids.port_id, hostdata->madapter_info.partition_name,
1977 sizeof(ids.port_id));
1978 ids.roles = SRP_RPORT_ROLE_TARGET;
1979 rport = srp_rport_add(host, &ids);
1981 goto add_srp_port_failed;
1983 /* Try to send an initialization message. Note that this is allowed
1984 * to fail if the other end is not acive. In that case we don't
1987 if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
1988 || rc == H_RESOURCE) {
1990 * Wait around max init_timeout secs for the adapter to finish
1991 * initializing. When we are done initializing, we will have a
1992 * valid request_limit. We don't want Linux scanning before
1995 for (wait_switch = jiffies + (init_timeout * HZ);
1996 time_before(jiffies, wait_switch) &&
1997 atomic_read(&hostdata->request_limit) < 2;) {
2002 /* if we now have a valid request_limit, initiate a scan */
2003 if (atomic_read(&hostdata->request_limit) > 0)
2004 scsi_scan_host(host);
2007 dev_set_drvdata(&vdev->dev, hostdata);
2010 add_srp_port_failed:
2011 scsi_remove_host(hostdata->host);
2013 release_event_pool(&hostdata->pool, hostdata);
2015 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
2017 kthread_stop(hostdata->work_thread);
2019 unmap_persist_bufs(hostdata);
2020 persist_bufs_failed:
2021 scsi_host_put(host);
2022 scsi_host_alloc_failed:
2026 static int ibmvscsi_remove(struct vio_dev *vdev)
2028 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2029 unmap_persist_bufs(hostdata);
2030 release_event_pool(&hostdata->pool, hostdata);
2031 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
2034 kthread_stop(hostdata->work_thread);
2035 srp_remove_host(hostdata->host);
2036 scsi_remove_host(hostdata->host);
2037 scsi_host_put(hostdata->host);
2043 * ibmvscsi_resume: Resume from suspend
2044 * @dev: device struct
2046 * We may have lost an interrupt across suspend/resume, so kick the
2049 static int ibmvscsi_resume(struct device *dev)
2051 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
2052 return ibmvscsi_ops->resume(hostdata);
2056 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
2059 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
2060 {"vscsi", "IBM,v-scsi"},
2063 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
2065 static struct dev_pm_ops ibmvscsi_pm_ops = {
2066 .resume = ibmvscsi_resume
2069 static struct vio_driver ibmvscsi_driver = {
2070 .id_table = ibmvscsi_device_table,
2071 .probe = ibmvscsi_probe,
2072 .remove = ibmvscsi_remove,
2073 .get_desired_dma = ibmvscsi_get_desired_dma,
2076 .owner = THIS_MODULE,
2077 .pm = &ibmvscsi_pm_ops,
2081 static struct srp_function_template ibmvscsi_transport_functions = {
2084 int __init ibmvscsi_module_init(void)
2088 /* Ensure we have two requests to do error recovery */
2089 driver_template.can_queue = max_requests;
2090 max_events = max_requests + 2;
2092 if (firmware_has_feature(FW_FEATURE_ISERIES))
2093 ibmvscsi_ops = &iseriesvscsi_ops;
2094 else if (firmware_has_feature(FW_FEATURE_VIO))
2095 ibmvscsi_ops = &rpavscsi_ops;
2099 ibmvscsi_transport_template =
2100 srp_attach_transport(&ibmvscsi_transport_functions);
2101 if (!ibmvscsi_transport_template)
2104 ret = vio_register_driver(&ibmvscsi_driver);
2106 srp_release_transport(ibmvscsi_transport_template);
2110 void __exit ibmvscsi_module_exit(void)
2112 vio_unregister_driver(&ibmvscsi_driver);
2113 srp_release_transport(ibmvscsi_transport_template);
2116 module_init(ibmvscsi_module_init);
2117 module_exit(ibmvscsi_module_exit);