Merge branch 'for_paulus' of master.kernel.org:/pub/scm/linux/kernel/git/galak/powerpc
[pandora-kernel.git] / drivers / scsi / ibmvscsi / ibmvscsi.c
1 /* ------------------------------------------------------------
2  * ibmvscsi.c
3  * (C) Copyright IBM Corporation 1994, 2004
4  * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5  *          Santiago Leon (santil@us.ibm.com)
6  *          Dave Boutcher (sleddog@us.ibm.com)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
21  * USA
22  *
23  * ------------------------------------------------------------
24  * Emulation of a SCSI host adapter for Virtual I/O devices
25  *
26  * This driver supports the SCSI adapter implemented by the IBM
27  * Power5 firmware.  That SCSI adapter is not a physical adapter,
28  * but allows Linux SCSI peripheral drivers to directly
29  * access devices in another logical partition on the physical system.
30  *
31  * The virtual adapter(s) are present in the open firmware device
32  * tree just like real adapters.
33  *
34  * One of the capabilities provided on these systems is the ability
35  * to DMA between partitions.  The architecture states that for VSCSI,
36  * the server side is allowed to DMA to and from the client.  The client
37  * is never trusted to DMA to or from the server directly.
38  *
39  * Messages are sent between partitions on a "Command/Response Queue" 
40  * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
41  * Senders cannot access the buffer directly, but send messages by
42  * making a hypervisor call and passing in the 16 bytes.  The hypervisor
43  * puts the message in the next 16 byte space in round-robbin fashion,
44  * turns on the high order bit of the message (the valid bit), and 
45  * generates an interrupt to the receiver (if interrupts are turned on.) 
46  * The receiver just turns off the valid bit when they have copied out
47  * the message.
48  *
49  * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
50  * (IU) (as defined in the T10 standard available at www.t10.org), gets 
51  * a DMA address for the message, and sends it to the server as the
52  * payload of a CRQ message.  The server DMAs the SRP IU and processes it,
53  * including doing any additional data transfers.  When it is done, it
54  * DMAs the SRP response back to the same address as the request came from,
55  * and sends a CRQ message back to inform the client that the request has
56  * completed.
57  *
58  * Note that some of the underlying infrastructure is different between
59  * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
60  * the older iSeries hypervisor models.  To support both, some low level
61  * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
62  * The Makefile should pick one, not two, not zero, of these.
63  *
64  * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
65  * interfaces.  It would be really nice to abstract this above an RDMA
66  * layer.
67  */
68
69 #include <linux/module.h>
70 #include <linux/moduleparam.h>
71 #include <linux/dma-mapping.h>
72 #include <linux/delay.h>
73 #include <asm/vio.h>
74 #include <scsi/scsi.h>
75 #include <scsi/scsi_cmnd.h>
76 #include <scsi/scsi_host.h>
77 #include <scsi/scsi_device.h>
78 #include "ibmvscsi.h"
79
80 /* The values below are somewhat arbitrary default values, but 
81  * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
82  * Note that there are 3 bits of channel value, 6 bits of id, and
83  * 5 bits of LUN.
84  */
85 static int max_id = 64;
86 static int max_channel = 3;
87 static int init_timeout = 5;
88 static int max_requests = 50;
89
90 #define IBMVSCSI_VERSION "1.5.8"
91
92 MODULE_DESCRIPTION("IBM Virtual SCSI");
93 MODULE_AUTHOR("Dave Boutcher");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(IBMVSCSI_VERSION);
96
97 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
98 MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
99 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
100 MODULE_PARM_DESC(max_channel, "Largest channel value");
101 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
103 module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
105
106 /* ------------------------------------------------------------
107  * Routines for the event pool and event structs
108  */
109 /**
110  * initialize_event_pool: - Allocates and initializes the event pool for a host
111  * @pool:       event_pool to be initialized
112  * @size:       Number of events in pool
113  * @hostdata:   ibmvscsi_host_data who owns the event pool
114  *
115  * Returns zero on success.
116 */
117 static int initialize_event_pool(struct event_pool *pool,
118                                  int size, struct ibmvscsi_host_data *hostdata)
119 {
120         int i;
121
122         pool->size = size;
123         pool->next = 0;
124         pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL);
125         if (!pool->events)
126                 return -ENOMEM;
127         memset(pool->events, 0x00, pool->size * sizeof(*pool->events));
128
129         pool->iu_storage =
130             dma_alloc_coherent(hostdata->dev,
131                                pool->size * sizeof(*pool->iu_storage),
132                                &pool->iu_token, 0);
133         if (!pool->iu_storage) {
134                 kfree(pool->events);
135                 return -ENOMEM;
136         }
137
138         for (i = 0; i < pool->size; ++i) {
139                 struct srp_event_struct *evt = &pool->events[i];
140                 memset(&evt->crq, 0x00, sizeof(evt->crq));
141                 atomic_set(&evt->free, 1);
142                 evt->crq.valid = 0x80;
143                 evt->crq.IU_length = sizeof(*evt->xfer_iu);
144                 evt->crq.IU_data_ptr = pool->iu_token + 
145                         sizeof(*evt->xfer_iu) * i;
146                 evt->xfer_iu = pool->iu_storage + i;
147                 evt->hostdata = hostdata;
148                 evt->ext_list = NULL;
149                 evt->ext_list_token = 0;
150         }
151
152         return 0;
153 }
154
155 /**
156  * release_event_pool: - Frees memory of an event pool of a host
157  * @pool:       event_pool to be released
158  * @hostdata:   ibmvscsi_host_data who owns the even pool
159  *
160  * Returns zero on success.
161 */
162 static void release_event_pool(struct event_pool *pool,
163                                struct ibmvscsi_host_data *hostdata)
164 {
165         int i, in_use = 0;
166         for (i = 0; i < pool->size; ++i) {
167                 if (atomic_read(&pool->events[i].free) != 1)
168                         ++in_use;
169                 if (pool->events[i].ext_list) {
170                         dma_free_coherent(hostdata->dev,
171                                   SG_ALL * sizeof(struct srp_direct_buf),
172                                   pool->events[i].ext_list,
173                                   pool->events[i].ext_list_token);
174                 }
175         }
176         if (in_use)
177                 printk(KERN_WARNING
178                        "ibmvscsi: releasing event pool with %d "
179                        "events still in use?\n", in_use);
180         kfree(pool->events);
181         dma_free_coherent(hostdata->dev,
182                           pool->size * sizeof(*pool->iu_storage),
183                           pool->iu_storage, pool->iu_token);
184 }
185
186 /**
187  * valid_event_struct: - Determines if event is valid.
188  * @pool:       event_pool that contains the event
189  * @evt:        srp_event_struct to be checked for validity
190  *
191  * Returns zero if event is invalid, one otherwise.
192 */
193 static int valid_event_struct(struct event_pool *pool,
194                                 struct srp_event_struct *evt)
195 {
196         int index = evt - pool->events;
197         if (index < 0 || index >= pool->size)   /* outside of bounds */
198                 return 0;
199         if (evt != pool->events + index)        /* unaligned */
200                 return 0;
201         return 1;
202 }
203
204 /**
205  * ibmvscsi_free-event_struct: - Changes status of event to "free"
206  * @pool:       event_pool that contains the event
207  * @evt:        srp_event_struct to be modified
208  *
209 */
210 static void free_event_struct(struct event_pool *pool,
211                                        struct srp_event_struct *evt)
212 {
213         if (!valid_event_struct(pool, evt)) {
214                 printk(KERN_ERR
215                        "ibmvscsi: Freeing invalid event_struct %p "
216                        "(not in pool %p)\n", evt, pool->events);
217                 return;
218         }
219         if (atomic_inc_return(&evt->free) != 1) {
220                 printk(KERN_ERR
221                        "ibmvscsi: Freeing event_struct %p "
222                        "which is not in use!\n", evt);
223                 return;
224         }
225 }
226
227 /**
228  * get_evt_struct: - Gets the next free event in pool
229  * @pool:       event_pool that contains the events to be searched
230  *
231  * Returns the next event in "free" state, and NULL if none are free.
232  * Note that no synchronization is done here, we assume the host_lock
233  * will syncrhonze things.
234 */
235 static struct srp_event_struct *get_event_struct(struct event_pool *pool)
236 {
237         int i;
238         int poolsize = pool->size;
239         int offset = pool->next;
240
241         for (i = 0; i < poolsize; i++) {
242                 offset = (offset + 1) % poolsize;
243                 if (!atomic_dec_if_positive(&pool->events[offset].free)) {
244                         pool->next = offset;
245                         return &pool->events[offset];
246                 }
247         }
248
249         printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
250         return NULL;
251 }
252
253 /**
254  * init_event_struct: Initialize fields in an event struct that are always 
255  *                    required.
256  * @evt:        The event
257  * @done:       Routine to call when the event is responded to
258  * @format:     SRP or MAD format
259  * @timeout:    timeout value set in the CRQ
260  */
261 static void init_event_struct(struct srp_event_struct *evt_struct,
262                               void (*done) (struct srp_event_struct *),
263                               u8 format,
264                               int timeout)
265 {
266         evt_struct->cmnd = NULL;
267         evt_struct->cmnd_done = NULL;
268         evt_struct->sync_srp = NULL;
269         evt_struct->crq.format = format;
270         evt_struct->crq.timeout = timeout;
271         evt_struct->done = done;
272 }
273
274 /* ------------------------------------------------------------
275  * Routines for receiving SCSI responses from the hosting partition
276  */
277
278 /**
279  * set_srp_direction: Set the fields in the srp related to data
280  *     direction and number of buffers based on the direction in
281  *     the scsi_cmnd and the number of buffers
282  */
283 static void set_srp_direction(struct scsi_cmnd *cmd,
284                               struct srp_cmd *srp_cmd, 
285                               int numbuf)
286 {
287         u8 fmt;
288
289         if (numbuf == 0)
290                 return;
291         
292         if (numbuf == 1)
293                 fmt = SRP_DATA_DESC_DIRECT;
294         else {
295                 fmt = SRP_DATA_DESC_INDIRECT;
296                 numbuf = min(numbuf, MAX_INDIRECT_BUFS);
297
298                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
299                         srp_cmd->data_out_desc_cnt = numbuf;
300                 else
301                         srp_cmd->data_in_desc_cnt = numbuf;
302         }
303
304         if (cmd->sc_data_direction == DMA_TO_DEVICE)
305                 srp_cmd->buf_fmt = fmt << 4;
306         else
307                 srp_cmd->buf_fmt = fmt;
308 }
309
310 static void unmap_sg_list(int num_entries,
311                 struct device *dev,
312                 struct srp_direct_buf *md)
313 {
314         int i;
315
316         for (i = 0; i < num_entries; ++i)
317                 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
318 }
319
320 /**
321  * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
322  * @cmd:        srp_cmd whose additional_data member will be unmapped
323  * @dev:        device for which the memory is mapped
324  *
325 */
326 static void unmap_cmd_data(struct srp_cmd *cmd,
327                            struct srp_event_struct *evt_struct,
328                            struct device *dev)
329 {
330         u8 out_fmt, in_fmt;
331
332         out_fmt = cmd->buf_fmt >> 4;
333         in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
334
335         if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
336                 return;
337         else if (out_fmt == SRP_DATA_DESC_DIRECT ||
338                  in_fmt == SRP_DATA_DESC_DIRECT) {
339                 struct srp_direct_buf *data =
340                         (struct srp_direct_buf *) cmd->add_data;
341                 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
342         } else {
343                 struct srp_indirect_buf *indirect =
344                         (struct srp_indirect_buf *) cmd->add_data;
345                 int num_mapped = indirect->table_desc.len /
346                         sizeof(struct srp_direct_buf);
347
348                 if (num_mapped <= MAX_INDIRECT_BUFS) {
349                         unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
350                         return;
351                 }
352
353                 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
354         }
355 }
356
357 static int map_sg_list(int num_entries, 
358                        struct scatterlist *sg,
359                        struct srp_direct_buf *md)
360 {
361         int i;
362         u64 total_length = 0;
363
364         for (i = 0; i < num_entries; ++i) {
365                 struct srp_direct_buf *descr = md + i;
366                 struct scatterlist *sg_entry = &sg[i];
367                 descr->va = sg_dma_address(sg_entry);
368                 descr->len = sg_dma_len(sg_entry);
369                 descr->key = 0;
370                 total_length += sg_dma_len(sg_entry);
371         }
372         return total_length;
373 }
374
375 /**
376  * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
377  * @cmd:        Scsi_Cmnd with the scatterlist
378  * @srp_cmd:    srp_cmd that contains the memory descriptor
379  * @dev:        device for which to map dma memory
380  *
381  * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
382  * Returns 1 on success.
383 */
384 static int map_sg_data(struct scsi_cmnd *cmd,
385                        struct srp_event_struct *evt_struct,
386                        struct srp_cmd *srp_cmd, struct device *dev)
387 {
388
389         int sg_mapped;
390         u64 total_length = 0;
391         struct scatterlist *sg = cmd->request_buffer;
392         struct srp_direct_buf *data =
393                 (struct srp_direct_buf *) srp_cmd->add_data;
394         struct srp_indirect_buf *indirect =
395                 (struct srp_indirect_buf *) data;
396
397         sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
398
399         if (sg_mapped == 0)
400                 return 0;
401
402         set_srp_direction(cmd, srp_cmd, sg_mapped);
403
404         /* special case; we can use a single direct descriptor */
405         if (sg_mapped == 1) {
406                 data->va = sg_dma_address(&sg[0]);
407                 data->len = sg_dma_len(&sg[0]);
408                 data->key = 0;
409                 return 1;
410         }
411
412         if (sg_mapped > SG_ALL) {
413                 printk(KERN_ERR
414                        "ibmvscsi: More than %d mapped sg entries, got %d\n",
415                        SG_ALL, sg_mapped);
416                 return 0;
417         }
418
419         indirect->table_desc.va = 0;
420         indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
421         indirect->table_desc.key = 0;
422
423         if (sg_mapped <= MAX_INDIRECT_BUFS) {
424                 total_length = map_sg_list(sg_mapped, sg,
425                                            &indirect->desc_list[0]);
426                 indirect->len = total_length;
427                 return 1;
428         }
429
430         /* get indirect table */
431         if (!evt_struct->ext_list) {
432                 evt_struct->ext_list = (struct srp_direct_buf *)
433                         dma_alloc_coherent(dev, 
434                                            SG_ALL * sizeof(struct srp_direct_buf),
435                                            &evt_struct->ext_list_token, 0);
436                 if (!evt_struct->ext_list) {
437                         printk(KERN_ERR
438                                "ibmvscsi: Can't allocate memory for indirect table\n");
439                         return 0;
440                         
441                 }
442         }
443
444         total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);        
445
446         indirect->len = total_length;
447         indirect->table_desc.va = evt_struct->ext_list_token;
448         indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
449         memcpy(indirect->desc_list, evt_struct->ext_list,
450                MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
451         
452         return 1;
453 }
454
455 /**
456  * map_single_data: - Maps memory and initializes memory decriptor fields
457  * @cmd:        struct scsi_cmnd with the memory to be mapped
458  * @srp_cmd:    srp_cmd that contains the memory descriptor
459  * @dev:        device for which to map dma memory
460  *
461  * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
462  * Returns 1 on success.
463 */
464 static int map_single_data(struct scsi_cmnd *cmd,
465                            struct srp_cmd *srp_cmd, struct device *dev)
466 {
467         struct srp_direct_buf *data =
468                 (struct srp_direct_buf *) srp_cmd->add_data;
469
470         data->va =
471                 dma_map_single(dev, cmd->request_buffer,
472                                cmd->request_bufflen,
473                                DMA_BIDIRECTIONAL);
474         if (dma_mapping_error(data->va)) {
475                 printk(KERN_ERR
476                        "ibmvscsi: Unable to map request_buffer for command!\n");
477                 return 0;
478         }
479         data->len = cmd->request_bufflen;
480         data->key = 0;
481
482         set_srp_direction(cmd, srp_cmd, 1);
483
484         return 1;
485 }
486
487 /**
488  * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
489  * @cmd:        struct scsi_cmnd with the memory to be mapped
490  * @srp_cmd:    srp_cmd that contains the memory descriptor
491  * @dev:        dma device for which to map dma memory
492  *
493  * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 
494  * Returns 1 on success.
495 */
496 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
497                                 struct srp_event_struct *evt_struct,
498                                 struct srp_cmd *srp_cmd, struct device *dev)
499 {
500         switch (cmd->sc_data_direction) {
501         case DMA_FROM_DEVICE:
502         case DMA_TO_DEVICE:
503                 break;
504         case DMA_NONE:
505                 return 1;
506         case DMA_BIDIRECTIONAL:
507                 printk(KERN_ERR
508                        "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");
509                 return 0;
510         default:
511                 printk(KERN_ERR
512                        "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",
513                        cmd->sc_data_direction);
514                 return 0;
515         }
516
517         if (!cmd->request_buffer)
518                 return 1;
519         if (cmd->use_sg)
520                 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
521         return map_single_data(cmd, srp_cmd, dev);
522 }
523
524 /* ------------------------------------------------------------
525  * Routines for sending and receiving SRPs
526  */
527 /**
528  * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
529  * @evt_struct: evt_struct to be sent
530  * @hostdata:   ibmvscsi_host_data of host
531  *
532  * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
533  * Note that this routine assumes that host_lock is held for synchronization
534 */
535 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
536                                    struct ibmvscsi_host_data *hostdata)
537 {
538         u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
539         int rc;
540
541         /* If we have exhausted our request limit, just fail this request.
542          * Note that there are rare cases involving driver generated requests 
543          * (such as task management requests) that the mid layer may think we
544          * can handle more requests (can_queue) when we actually can't
545          */
546         if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
547             (atomic_dec_if_positive(&hostdata->request_limit) < 0))
548                 goto send_error;
549
550         /* Copy the IU into the transfer area */
551         *evt_struct->xfer_iu = evt_struct->iu;
552         evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
553
554         /* Add this to the sent list.  We need to do this 
555          * before we actually send 
556          * in case it comes back REALLY fast
557          */
558         list_add_tail(&evt_struct->list, &hostdata->sent);
559
560         if ((rc =
561              ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
562                 list_del(&evt_struct->list);
563
564                 printk(KERN_ERR "ibmvscsi: send error %d\n",
565                        rc);
566                 goto send_error;
567         }
568
569         return 0;
570
571  send_error:
572         unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
573
574         free_event_struct(&hostdata->pool, evt_struct);
575         return SCSI_MLQUEUE_HOST_BUSY;
576 }
577
578 /**
579  * handle_cmd_rsp: -  Handle responses from commands
580  * @evt_struct: srp_event_struct to be handled
581  *
582  * Used as a callback by when sending scsi cmds.
583  * Gets called by ibmvscsi_handle_crq()
584 */
585 static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
586 {
587         struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
588         struct scsi_cmnd *cmnd = evt_struct->cmnd;
589
590         if (unlikely(rsp->opcode != SRP_RSP)) {
591                 if (printk_ratelimit())
592                         printk(KERN_WARNING 
593                                "ibmvscsi: bad SRP RSP type %d\n",
594                                rsp->opcode);
595         }
596         
597         if (cmnd) {
598                 cmnd->result = rsp->status;
599                 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
600                         memcpy(cmnd->sense_buffer,
601                                rsp->data,
602                                rsp->sense_data_len);
603                 unmap_cmd_data(&evt_struct->iu.srp.cmd, 
604                                evt_struct, 
605                                evt_struct->hostdata->dev);
606
607                 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
608                         cmnd->resid = rsp->data_out_res_cnt;
609                 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
610                         cmnd->resid = rsp->data_in_res_cnt;
611         }
612
613         if (evt_struct->cmnd_done)
614                 evt_struct->cmnd_done(cmnd);
615 }
616
617 /**
618  * lun_from_dev: - Returns the lun of the scsi device
619  * @dev:        struct scsi_device
620  *
621 */
622 static inline u16 lun_from_dev(struct scsi_device *dev)
623 {
624         return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
625 }
626
627 /**
628  * ibmvscsi_queue: - The queuecommand function of the scsi template 
629  * @cmd:        struct scsi_cmnd to be executed
630  * @done:       Callback function to be called when cmd is completed
631 */
632 static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
633                                  void (*done) (struct scsi_cmnd *))
634 {
635         struct srp_cmd *srp_cmd;
636         struct srp_event_struct *evt_struct;
637         struct srp_indirect_buf *indirect;
638         struct ibmvscsi_host_data *hostdata =
639                 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
640         u16 lun = lun_from_dev(cmnd->device);
641         u8 out_fmt, in_fmt;
642
643         evt_struct = get_event_struct(&hostdata->pool);
644         if (!evt_struct)
645                 return SCSI_MLQUEUE_HOST_BUSY;
646
647         /* Set up the actual SRP IU */
648         srp_cmd = &evt_struct->iu.srp.cmd;
649         memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
650         srp_cmd->opcode = SRP_CMD;
651         memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
652         srp_cmd->lun = ((u64) lun) << 48;
653
654         if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
655                 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
656                 free_event_struct(&hostdata->pool, evt_struct);
657                 return SCSI_MLQUEUE_HOST_BUSY;
658         }
659
660         init_event_struct(evt_struct,
661                           handle_cmd_rsp,
662                           VIOSRP_SRP_FORMAT,
663                           cmnd->timeout_per_command/HZ);
664
665         evt_struct->cmnd = cmnd;
666         evt_struct->cmnd_done = done;
667
668         /* Fix up dma address of the buffer itself */
669         indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
670         out_fmt = srp_cmd->buf_fmt >> 4;
671         in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
672         if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
673              out_fmt == SRP_DATA_DESC_INDIRECT) &&
674             indirect->table_desc.va == 0) {
675                 indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
676                         offsetof(struct srp_cmd, add_data) +
677                         offsetof(struct srp_indirect_buf, desc_list);
678         }
679
680         return ibmvscsi_send_srp_event(evt_struct, hostdata);
681 }
682
683 /* ------------------------------------------------------------
684  * Routines for driver initialization
685  */
686 /**
687  * adapter_info_rsp: - Handle response to MAD adapter info request
688  * @evt_struct: srp_event_struct with the response
689  *
690  * Used as a "done" callback by when sending adapter_info. Gets called
691  * by ibmvscsi_handle_crq()
692 */
693 static void adapter_info_rsp(struct srp_event_struct *evt_struct)
694 {
695         struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
696         dma_unmap_single(hostdata->dev,
697                          evt_struct->iu.mad.adapter_info.buffer,
698                          evt_struct->iu.mad.adapter_info.common.length,
699                          DMA_BIDIRECTIONAL);
700
701         if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
702                 printk("ibmvscsi: error %d getting adapter info\n",
703                        evt_struct->xfer_iu->mad.adapter_info.common.status);
704         } else {
705                 printk("ibmvscsi: host srp version: %s, "
706                        "host partition %s (%d), OS %d, max io %u\n",
707                        hostdata->madapter_info.srp_version,
708                        hostdata->madapter_info.partition_name,
709                        hostdata->madapter_info.partition_number,
710                        hostdata->madapter_info.os_type,
711                        hostdata->madapter_info.port_max_txu[0]);
712                 
713                 if (hostdata->madapter_info.port_max_txu[0]) 
714                         hostdata->host->max_sectors = 
715                                 hostdata->madapter_info.port_max_txu[0] >> 9;
716                 
717                 if (hostdata->madapter_info.os_type == 3 &&
718                     strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
719                         printk("ibmvscsi: host (Ver. %s) doesn't support large"
720                                "transfers\n",
721                                hostdata->madapter_info.srp_version);
722                         printk("ibmvscsi: limiting scatterlists to %d\n",
723                                MAX_INDIRECT_BUFS);
724                         hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
725                 }
726         }
727 }
728
729 /**
730  * send_mad_adapter_info: - Sends the mad adapter info request
731  *      and stores the result so it can be retrieved with
732  *      sysfs.  We COULD consider causing a failure if the
733  *      returned SRP version doesn't match ours.
734  * @hostdata:   ibmvscsi_host_data of host
735  * 
736  * Returns zero if successful.
737 */
738 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
739 {
740         struct viosrp_adapter_info *req;
741         struct srp_event_struct *evt_struct;
742         
743         evt_struct = get_event_struct(&hostdata->pool);
744         if (!evt_struct) {
745                 printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
746                        "for ADAPTER_INFO_REQ!\n");
747                 return;
748         }
749
750         init_event_struct(evt_struct,
751                           adapter_info_rsp,
752                           VIOSRP_MAD_FORMAT,
753                           init_timeout * HZ);
754         
755         req = &evt_struct->iu.mad.adapter_info;
756         memset(req, 0x00, sizeof(*req));
757         
758         req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
759         req->common.length = sizeof(hostdata->madapter_info);
760         req->buffer = dma_map_single(hostdata->dev,
761                                      &hostdata->madapter_info,
762                                      sizeof(hostdata->madapter_info),
763                                      DMA_BIDIRECTIONAL);
764
765         if (dma_mapping_error(req->buffer)) {
766                 printk(KERN_ERR
767                        "ibmvscsi: Unable to map request_buffer "
768                        "for adapter_info!\n");
769                 free_event_struct(&hostdata->pool, evt_struct);
770                 return;
771         }
772         
773         if (ibmvscsi_send_srp_event(evt_struct, hostdata))
774                 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
775 };
776
777 /**
778  * login_rsp: - Handle response to SRP login request
779  * @evt_struct: srp_event_struct with the response
780  *
781  * Used as a "done" callback by when sending srp_login. Gets called
782  * by ibmvscsi_handle_crq()
783 */
784 static void login_rsp(struct srp_event_struct *evt_struct)
785 {
786         struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
787         switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
788         case SRP_LOGIN_RSP:     /* it worked! */
789                 break;
790         case SRP_LOGIN_REJ:     /* refused! */
791                 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
792                        evt_struct->xfer_iu->srp.login_rej.reason);
793                 /* Login failed.  */
794                 atomic_set(&hostdata->request_limit, -1);
795                 return;
796         default:
797                 printk(KERN_ERR
798                        "ibmvscsi: Invalid login response typecode 0x%02x!\n",
799                        evt_struct->xfer_iu->srp.login_rsp.opcode);
800                 /* Login failed.  */
801                 atomic_set(&hostdata->request_limit, -1);
802                 return;
803         }
804
805         printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
806
807         if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta >
808             (max_requests - 2))
809                 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
810                     max_requests - 2;
811
812         /* Now we know what the real request-limit is */
813         atomic_set(&hostdata->request_limit,
814                    evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
815
816         hostdata->host->can_queue =
817             evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
818
819         if (hostdata->host->can_queue < 1) {
820                 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
821                 return;
822         }
823
824         /* If we had any pending I/Os, kick them */
825         scsi_unblock_requests(hostdata->host);
826
827         send_mad_adapter_info(hostdata);
828         return;
829 }
830
831 /**
832  * send_srp_login: - Sends the srp login
833  * @hostdata:   ibmvscsi_host_data of host
834  * 
835  * Returns zero if successful.
836 */
837 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
838 {
839         int rc;
840         unsigned long flags;
841         struct srp_login_req *login;
842         struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
843         if (!evt_struct) {
844                 printk(KERN_ERR
845                        "ibmvscsi: couldn't allocate an event for login req!\n");
846                 return FAILED;
847         }
848
849         init_event_struct(evt_struct,
850                           login_rsp,
851                           VIOSRP_SRP_FORMAT,
852                           init_timeout * HZ);
853
854         login = &evt_struct->iu.srp.login_req;
855         memset(login, 0x00, sizeof(struct srp_login_req));
856         login->opcode = SRP_LOGIN_REQ;
857         login->req_it_iu_len = sizeof(union srp_iu);
858         login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
859         
860         spin_lock_irqsave(hostdata->host->host_lock, flags);
861         /* Start out with a request limit of 1, since this is negotiated in
862          * the login request we are just sending
863          */
864         atomic_set(&hostdata->request_limit, 1);
865
866         rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
867         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
868         printk("ibmvscsic: sent SRP login\n");
869         return rc;
870 };
871
872 /**
873  * sync_completion: Signal that a synchronous command has completed
874  * Note that after returning from this call, the evt_struct is freed.
875  * the caller waiting on this completion shouldn't touch the evt_struct
876  * again.
877  */
878 static void sync_completion(struct srp_event_struct *evt_struct)
879 {
880         /* copy the response back */
881         if (evt_struct->sync_srp)
882                 *evt_struct->sync_srp = *evt_struct->xfer_iu;
883         
884         complete(&evt_struct->comp);
885 }
886
887 /**
888  * ibmvscsi_abort: Abort a command...from scsi host template
889  * send this over to the server and wait synchronously for the response
890  */
891 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
892 {
893         struct ibmvscsi_host_data *hostdata =
894             (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
895         struct srp_tsk_mgmt *tsk_mgmt;
896         struct srp_event_struct *evt;
897         struct srp_event_struct *tmp_evt, *found_evt;
898         union viosrp_iu srp_rsp;
899         int rsp_rc;
900         unsigned long flags;
901         u16 lun = lun_from_dev(cmd->device);
902
903         /* First, find this command in our sent list so we can figure
904          * out the correct tag
905          */
906         spin_lock_irqsave(hostdata->host->host_lock, flags);
907         found_evt = NULL;
908         list_for_each_entry(tmp_evt, &hostdata->sent, list) {
909                 if (tmp_evt->cmnd == cmd) {
910                         found_evt = tmp_evt;
911                         break;
912                 }
913         }
914
915         if (!found_evt) {
916                 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
917                 return FAILED;
918         }
919
920         evt = get_event_struct(&hostdata->pool);
921         if (evt == NULL) {
922                 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
923                 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
924                 return FAILED;
925         }
926         
927         init_event_struct(evt,
928                           sync_completion,
929                           VIOSRP_SRP_FORMAT,
930                           init_timeout * HZ);
931
932         tsk_mgmt = &evt->iu.srp.tsk_mgmt;
933         
934         /* Set up an abort SRP command */
935         memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
936         tsk_mgmt->opcode = SRP_TSK_MGMT;
937         tsk_mgmt->lun = ((u64) lun) << 48;
938         tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
939         tsk_mgmt->task_tag = (u64) found_evt;
940
941         printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
942                tsk_mgmt->lun, tsk_mgmt->task_tag);
943
944         evt->sync_srp = &srp_rsp;
945         init_completion(&evt->comp);
946         rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
947         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
948         if (rsp_rc != 0) {
949                 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
950                 return FAILED;
951         }
952
953         wait_for_completion(&evt->comp);
954
955         /* make sure we got a good response */
956         if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
957                 if (printk_ratelimit())
958                         printk(KERN_WARNING 
959                                "ibmvscsi: abort bad SRP RSP type %d\n",
960                                srp_rsp.srp.rsp.opcode);
961                 return FAILED;
962         }
963
964         if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
965                 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
966         else
967                 rsp_rc = srp_rsp.srp.rsp.status;
968
969         if (rsp_rc) {
970                 if (printk_ratelimit())
971                         printk(KERN_WARNING 
972                                "ibmvscsi: abort code %d for task tag 0x%lx\n",
973                                rsp_rc,
974                                tsk_mgmt->task_tag);
975                 return FAILED;
976         }
977
978         /* Because we dropped the spinlock above, it's possible
979          * The event is no longer in our list.  Make sure it didn't
980          * complete while we were aborting
981          */
982         spin_lock_irqsave(hostdata->host->host_lock, flags);
983         found_evt = NULL;
984         list_for_each_entry(tmp_evt, &hostdata->sent, list) {
985                 if (tmp_evt->cmnd == cmd) {
986                         found_evt = tmp_evt;
987                         break;
988                 }
989         }
990
991         if (found_evt == NULL) {
992                 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
993                 printk(KERN_INFO
994                        "ibmvscsi: aborted task tag 0x%lx completed\n",
995                        tsk_mgmt->task_tag);
996                 return SUCCESS;
997         }
998
999         printk(KERN_INFO
1000                "ibmvscsi: successfully aborted task tag 0x%lx\n",
1001                tsk_mgmt->task_tag);
1002
1003         cmd->result = (DID_ABORT << 16);
1004         list_del(&found_evt->list);
1005         unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1006                        found_evt->hostdata->dev);
1007         free_event_struct(&found_evt->hostdata->pool, found_evt);
1008         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1009         atomic_inc(&hostdata->request_limit);
1010         return SUCCESS;
1011 }
1012
1013 /**
1014  * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
1015  * template send this over to the server and wait synchronously for the 
1016  * response
1017  */
1018 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1019 {
1020         struct ibmvscsi_host_data *hostdata =
1021             (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
1022
1023         struct srp_tsk_mgmt *tsk_mgmt;
1024         struct srp_event_struct *evt;
1025         struct srp_event_struct *tmp_evt, *pos;
1026         union viosrp_iu srp_rsp;
1027         int rsp_rc;
1028         unsigned long flags;
1029         u16 lun = lun_from_dev(cmd->device);
1030
1031         spin_lock_irqsave(hostdata->host->host_lock, flags);
1032         evt = get_event_struct(&hostdata->pool);
1033         if (evt == NULL) {
1034                 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1035                 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
1036                 return FAILED;
1037         }
1038         
1039         init_event_struct(evt,
1040                           sync_completion,
1041                           VIOSRP_SRP_FORMAT,
1042                           init_timeout * HZ);
1043
1044         tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1045
1046         /* Set up a lun reset SRP command */
1047         memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1048         tsk_mgmt->opcode = SRP_TSK_MGMT;
1049         tsk_mgmt->lun = ((u64) lun) << 48;
1050         tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1051
1052         printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
1053                tsk_mgmt->lun);
1054
1055         evt->sync_srp = &srp_rsp;
1056         init_completion(&evt->comp);
1057         rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
1058         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1059         if (rsp_rc != 0) {
1060                 printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
1061                 return FAILED;
1062         }
1063
1064         wait_for_completion(&evt->comp);
1065
1066         /* make sure we got a good response */
1067         if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1068                 if (printk_ratelimit())
1069                         printk(KERN_WARNING 
1070                                "ibmvscsi: reset bad SRP RSP type %d\n",
1071                                srp_rsp.srp.rsp.opcode);
1072                 return FAILED;
1073         }
1074
1075         if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1076                 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1077         else
1078                 rsp_rc = srp_rsp.srp.rsp.status;
1079
1080         if (rsp_rc) {
1081                 if (printk_ratelimit())
1082                         printk(KERN_WARNING 
1083                                "ibmvscsi: reset code %d for task tag 0x%lx\n",
1084                                rsp_rc, tsk_mgmt->task_tag);
1085                 return FAILED;
1086         }
1087
1088         /* We need to find all commands for this LUN that have not yet been
1089          * responded to, and fail them with DID_RESET
1090          */
1091         spin_lock_irqsave(hostdata->host->host_lock, flags);
1092         list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1093                 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1094                         if (tmp_evt->cmnd)
1095                                 tmp_evt->cmnd->result = (DID_RESET << 16);
1096                         list_del(&tmp_evt->list);
1097                         unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1098                                        tmp_evt->hostdata->dev);
1099                         free_event_struct(&tmp_evt->hostdata->pool,
1100                                                    tmp_evt);
1101                         atomic_inc(&hostdata->request_limit);
1102                         if (tmp_evt->cmnd_done)
1103                                 tmp_evt->cmnd_done(tmp_evt->cmnd);
1104                         else if (tmp_evt->done)
1105                                 tmp_evt->done(tmp_evt);
1106                 }
1107         }
1108         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1109         return SUCCESS;
1110 }
1111
1112 /**
1113  * purge_requests: Our virtual adapter just shut down.  purge any sent requests
1114  * @hostdata:    the adapter
1115  */
1116 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
1117 {
1118         struct srp_event_struct *tmp_evt, *pos;
1119         unsigned long flags;
1120
1121         spin_lock_irqsave(hostdata->host->host_lock, flags);
1122         list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1123                 list_del(&tmp_evt->list);
1124                 if (tmp_evt->cmnd) {
1125                         tmp_evt->cmnd->result = (error_code << 16);
1126                         unmap_cmd_data(&tmp_evt->iu.srp.cmd, 
1127                                        tmp_evt, 
1128                                        tmp_evt->hostdata->dev);
1129                         if (tmp_evt->cmnd_done)
1130                                 tmp_evt->cmnd_done(tmp_evt->cmnd);
1131                 } else {
1132                         if (tmp_evt->done) {
1133                                 tmp_evt->done(tmp_evt);
1134                         }
1135                 }
1136                 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
1137         }
1138         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1139 }
1140
1141 /**
1142  * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
1143  * @crq:        Command/Response queue
1144  * @hostdata:   ibmvscsi_host_data of host
1145  *
1146 */
1147 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1148                          struct ibmvscsi_host_data *hostdata)
1149 {
1150         unsigned long flags;
1151         struct srp_event_struct *evt_struct =
1152             (struct srp_event_struct *)crq->IU_data_ptr;
1153         switch (crq->valid) {
1154         case 0xC0:              /* initialization */
1155                 switch (crq->format) {
1156                 case 0x01:      /* Initialization message */
1157                         printk(KERN_INFO "ibmvscsi: partner initialized\n");
1158                         /* Send back a response */
1159                         if (ibmvscsi_send_crq(hostdata,
1160                                               0xC002000000000000LL, 0) == 0) {
1161                                 /* Now login */
1162                                 send_srp_login(hostdata);
1163                         } else {
1164                                 printk(KERN_ERR
1165                                        "ibmvscsi: Unable to send init rsp\n");
1166                         }
1167
1168                         break;
1169                 case 0x02:      /* Initialization response */
1170                         printk(KERN_INFO
1171                                "ibmvscsi: partner initialization complete\n");
1172
1173                         /* Now login */
1174                         send_srp_login(hostdata);
1175                         break;
1176                 default:
1177                         printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
1178                 }
1179                 return;
1180         case 0xFF:      /* Hypervisor telling us the connection is closed */
1181                 scsi_block_requests(hostdata->host);
1182                 if (crq->format == 0x06) {
1183                         /* We need to re-setup the interpartition connection */
1184                         printk(KERN_INFO
1185                                "ibmvscsi: Re-enabling adapter!\n");
1186                         atomic_set(&hostdata->request_limit, -1);
1187                         purge_requests(hostdata, DID_REQUEUE);
1188                         if (ibmvscsi_reenable_crq_queue(&hostdata->queue,
1189                                                         hostdata) == 0)
1190                                 if (ibmvscsi_send_crq(hostdata,
1191                                                       0xC001000000000000LL, 0))
1192                                         printk(KERN_ERR
1193                                                "ibmvscsi: transmit error after"
1194                                                " enable\n");
1195                 } else {
1196                         printk(KERN_INFO
1197                                "ibmvscsi: Virtual adapter failed rc %d!\n",
1198                                crq->format);
1199
1200                         atomic_set(&hostdata->request_limit, -1);
1201                         purge_requests(hostdata, DID_ERROR);
1202                         ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
1203                 }
1204                 scsi_unblock_requests(hostdata->host);
1205                 return;
1206         case 0x80:              /* real payload */
1207                 break;
1208         default:
1209                 printk(KERN_ERR
1210                        "ibmvscsi: got an invalid message type 0x%02x\n",
1211                        crq->valid);
1212                 return;
1213         }
1214
1215         /* The only kind of payload CRQs we should get are responses to
1216          * things we send. Make sure this response is to something we
1217          * actually sent
1218          */
1219         if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1220                 printk(KERN_ERR
1221                        "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
1222                        (void *)crq->IU_data_ptr);
1223                 return;
1224         }
1225
1226         if (atomic_read(&evt_struct->free)) {
1227                 printk(KERN_ERR
1228                        "ibmvscsi: received duplicate  correlation_token 0x%p!\n",
1229                        (void *)crq->IU_data_ptr);
1230                 return;
1231         }
1232
1233         if (crq->format == VIOSRP_SRP_FORMAT)
1234                 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
1235                            &hostdata->request_limit);
1236
1237         if (evt_struct->done)
1238                 evt_struct->done(evt_struct);
1239         else
1240                 printk(KERN_ERR
1241                        "ibmvscsi: returned done() is NULL; not running it!\n");
1242
1243         /*
1244          * Lock the host_lock before messing with these structures, since we
1245          * are running in a task context
1246          */
1247         spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1248         list_del(&evt_struct->list);
1249         free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1250         spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1251 }
1252
1253 /**
1254  * ibmvscsi_get_host_config: Send the command to the server to get host
1255  * configuration data.  The data is opaque to us.
1256  */
1257 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1258                                    unsigned char *buffer, int length)
1259 {
1260         struct viosrp_host_config *host_config;
1261         struct srp_event_struct *evt_struct;
1262         int rc;
1263
1264         evt_struct = get_event_struct(&hostdata->pool);
1265         if (!evt_struct) {
1266                 printk(KERN_ERR
1267                        "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
1268                 return -1;
1269         }
1270
1271         init_event_struct(evt_struct,
1272                           sync_completion,
1273                           VIOSRP_MAD_FORMAT,
1274                           init_timeout * HZ);
1275
1276         host_config = &evt_struct->iu.mad.host_config;
1277
1278         /* Set up a lun reset SRP command */
1279         memset(host_config, 0x00, sizeof(*host_config));
1280         host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1281         host_config->common.length = length;
1282         host_config->buffer = dma_map_single(hostdata->dev, buffer, length,
1283                                             DMA_BIDIRECTIONAL);
1284
1285         if (dma_mapping_error(host_config->buffer)) {
1286                 printk(KERN_ERR
1287                        "ibmvscsi: dma_mapping error " "getting host config\n");
1288                 free_event_struct(&hostdata->pool, evt_struct);
1289                 return -1;
1290         }
1291
1292         init_completion(&evt_struct->comp);
1293         rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
1294         if (rc == 0) {
1295                 wait_for_completion(&evt_struct->comp);
1296                 dma_unmap_single(hostdata->dev, host_config->buffer,
1297                                  length, DMA_BIDIRECTIONAL);
1298         }
1299
1300         return rc;
1301 }
1302
1303 /* ------------------------------------------------------------
1304  * sysfs attributes
1305  */
1306 static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
1307 {
1308         struct Scsi_Host *shost = class_to_shost(class_dev);
1309         struct ibmvscsi_host_data *hostdata =
1310             (struct ibmvscsi_host_data *)shost->hostdata;
1311         int len;
1312
1313         len = snprintf(buf, PAGE_SIZE, "%s\n",
1314                        hostdata->madapter_info.srp_version);
1315         return len;
1316 }
1317
1318 static struct class_device_attribute ibmvscsi_host_srp_version = {
1319         .attr = {
1320                  .name = "srp_version",
1321                  .mode = S_IRUGO,
1322                  },
1323         .show = show_host_srp_version,
1324 };
1325
1326 static ssize_t show_host_partition_name(struct class_device *class_dev,
1327                                         char *buf)
1328 {
1329         struct Scsi_Host *shost = class_to_shost(class_dev);
1330         struct ibmvscsi_host_data *hostdata =
1331             (struct ibmvscsi_host_data *)shost->hostdata;
1332         int len;
1333
1334         len = snprintf(buf, PAGE_SIZE, "%s\n",
1335                        hostdata->madapter_info.partition_name);
1336         return len;
1337 }
1338
1339 static struct class_device_attribute ibmvscsi_host_partition_name = {
1340         .attr = {
1341                  .name = "partition_name",
1342                  .mode = S_IRUGO,
1343                  },
1344         .show = show_host_partition_name,
1345 };
1346
1347 static ssize_t show_host_partition_number(struct class_device *class_dev,
1348                                           char *buf)
1349 {
1350         struct Scsi_Host *shost = class_to_shost(class_dev);
1351         struct ibmvscsi_host_data *hostdata =
1352             (struct ibmvscsi_host_data *)shost->hostdata;
1353         int len;
1354
1355         len = snprintf(buf, PAGE_SIZE, "%d\n",
1356                        hostdata->madapter_info.partition_number);
1357         return len;
1358 }
1359
1360 static struct class_device_attribute ibmvscsi_host_partition_number = {
1361         .attr = {
1362                  .name = "partition_number",
1363                  .mode = S_IRUGO,
1364                  },
1365         .show = show_host_partition_number,
1366 };
1367
1368 static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
1369 {
1370         struct Scsi_Host *shost = class_to_shost(class_dev);
1371         struct ibmvscsi_host_data *hostdata =
1372             (struct ibmvscsi_host_data *)shost->hostdata;
1373         int len;
1374
1375         len = snprintf(buf, PAGE_SIZE, "%d\n",
1376                        hostdata->madapter_info.mad_version);
1377         return len;
1378 }
1379
1380 static struct class_device_attribute ibmvscsi_host_mad_version = {
1381         .attr = {
1382                  .name = "mad_version",
1383                  .mode = S_IRUGO,
1384                  },
1385         .show = show_host_mad_version,
1386 };
1387
1388 static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
1389 {
1390         struct Scsi_Host *shost = class_to_shost(class_dev);
1391         struct ibmvscsi_host_data *hostdata =
1392             (struct ibmvscsi_host_data *)shost->hostdata;
1393         int len;
1394
1395         len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
1396         return len;
1397 }
1398
1399 static struct class_device_attribute ibmvscsi_host_os_type = {
1400         .attr = {
1401                  .name = "os_type",
1402                  .mode = S_IRUGO,
1403                  },
1404         .show = show_host_os_type,
1405 };
1406
1407 static ssize_t show_host_config(struct class_device *class_dev, char *buf)
1408 {
1409         struct Scsi_Host *shost = class_to_shost(class_dev);
1410         struct ibmvscsi_host_data *hostdata =
1411             (struct ibmvscsi_host_data *)shost->hostdata;
1412
1413         /* returns null-terminated host config data */
1414         if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
1415                 return strlen(buf);
1416         else
1417                 return 0;
1418 }
1419
1420 static struct class_device_attribute ibmvscsi_host_config = {
1421         .attr = {
1422                  .name = "config",
1423                  .mode = S_IRUGO,
1424                  },
1425         .show = show_host_config,
1426 };
1427
1428 static struct class_device_attribute *ibmvscsi_attrs[] = {
1429         &ibmvscsi_host_srp_version,
1430         &ibmvscsi_host_partition_name,
1431         &ibmvscsi_host_partition_number,
1432         &ibmvscsi_host_mad_version,
1433         &ibmvscsi_host_os_type,
1434         &ibmvscsi_host_config,
1435         NULL
1436 };
1437
1438 /* ------------------------------------------------------------
1439  * SCSI driver registration
1440  */
1441 static struct scsi_host_template driver_template = {
1442         .module = THIS_MODULE,
1443         .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
1444         .proc_name = "ibmvscsi",
1445         .queuecommand = ibmvscsi_queuecommand,
1446         .eh_abort_handler = ibmvscsi_eh_abort_handler,
1447         .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1448         .cmd_per_lun = 16,
1449         .can_queue = 1,         /* Updated after SRP_LOGIN */
1450         .this_id = -1,
1451         .sg_tablesize = SG_ALL,
1452         .use_clustering = ENABLE_CLUSTERING,
1453         .shost_attrs = ibmvscsi_attrs,
1454 };
1455
1456 /**
1457  * Called by bus code for each adapter
1458  */
1459 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1460 {
1461         struct ibmvscsi_host_data *hostdata;
1462         struct Scsi_Host *host;
1463         struct device *dev = &vdev->dev;
1464         unsigned long wait_switch = 0;
1465
1466         vdev->dev.driver_data = NULL;
1467
1468         host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1469         if (!host) {
1470                 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
1471                 goto scsi_host_alloc_failed;
1472         }
1473
1474         hostdata = (struct ibmvscsi_host_data *)host->hostdata;
1475         memset(hostdata, 0x00, sizeof(*hostdata));
1476         INIT_LIST_HEAD(&hostdata->sent);
1477         hostdata->host = host;
1478         hostdata->dev = dev;
1479         atomic_set(&hostdata->request_limit, -1);
1480         hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
1481
1482         if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata,
1483                                     max_requests) != 0) {
1484                 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
1485                 goto init_crq_failed;
1486         }
1487         if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
1488                 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n");
1489                 goto init_pool_failed;
1490         }
1491
1492         host->max_lun = 8;
1493         host->max_id = max_id;
1494         host->max_channel = max_channel;
1495
1496         if (scsi_add_host(hostdata->host, hostdata->dev))
1497                 goto add_host_failed;
1498
1499         /* Try to send an initialization message.  Note that this is allowed
1500          * to fail if the other end is not acive.  In that case we don't
1501          * want to scan
1502          */
1503         if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) {
1504                 /*
1505                  * Wait around max init_timeout secs for the adapter to finish
1506                  * initializing. When we are done initializing, we will have a
1507                  * valid request_limit.  We don't want Linux scanning before
1508                  * we are ready.
1509                  */
1510                 for (wait_switch = jiffies + (init_timeout * HZ);
1511                      time_before(jiffies, wait_switch) &&
1512                      atomic_read(&hostdata->request_limit) < 2;) {
1513
1514                         msleep(10);
1515                 }
1516
1517                 /* if we now have a valid request_limit, initiate a scan */
1518                 if (atomic_read(&hostdata->request_limit) > 0)
1519                         scsi_scan_host(host);
1520         }
1521
1522         vdev->dev.driver_data = hostdata;
1523         return 0;
1524
1525       add_host_failed:
1526         release_event_pool(&hostdata->pool, hostdata);
1527       init_pool_failed:
1528         ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
1529       init_crq_failed:
1530         scsi_host_put(host);
1531       scsi_host_alloc_failed:
1532         return -1;
1533 }
1534
1535 static int ibmvscsi_remove(struct vio_dev *vdev)
1536 {
1537         struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1538         release_event_pool(&hostdata->pool, hostdata);
1539         ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
1540                                    max_requests);
1541         
1542         scsi_remove_host(hostdata->host);
1543         scsi_host_put(hostdata->host);
1544
1545         return 0;
1546 }
1547
1548 /**
1549  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
1550  * support.
1551  */
1552 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1553         {"vscsi", "IBM,v-scsi"},
1554         { "", "" }
1555 };
1556 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
1557
1558 static struct vio_driver ibmvscsi_driver = {
1559         .id_table = ibmvscsi_device_table,
1560         .probe = ibmvscsi_probe,
1561         .remove = ibmvscsi_remove,
1562         .driver = {
1563                 .name = "ibmvscsi",
1564                 .owner = THIS_MODULE,
1565         }
1566 };
1567
1568 int __init ibmvscsi_module_init(void)
1569 {
1570         return vio_register_driver(&ibmvscsi_driver);
1571 }
1572
1573 void __exit ibmvscsi_module_exit(void)
1574 {
1575         vio_unregister_driver(&ibmvscsi_driver);
1576 }
1577
1578 module_init(ibmvscsi_module_init);
1579 module_exit(ibmvscsi_module_exit);