mm: thp: set the accessed flag for old pages on access fault
[pandora-kernel.git] / drivers / scsi / mvumi.c
1 /*
2  * Marvell UMI driver
3  *
4  * Copyright 2011 Marvell. <jyli@marvell.com>
5  *
6  * This file is licensed under GPLv2.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as
10  * published by the Free Software Foundation; version 2 of the
11  * License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21  * USA
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/device.h>
29 #include <linux/pci.h>
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/blkdev.h>
35 #include <linux/io.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_transport.h>
40 #include <scsi/scsi_eh.h>
41 #include <linux/uaccess.h>
42
43 #include "mvumi.h"
44
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("jyli@marvell.com");
47 MODULE_DESCRIPTION("Marvell UMI Driver");
48
49 static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
50         { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
51         { 0 }
52 };
53
54 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
55
56 static void tag_init(struct mvumi_tag *st, unsigned short size)
57 {
58         unsigned short i;
59         BUG_ON(size != st->size);
60         st->top = size;
61         for (i = 0; i < size; i++)
62                 st->stack[i] = size - 1 - i;
63 }
64
65 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
66 {
67         BUG_ON(st->top <= 0);
68         return st->stack[--st->top];
69 }
70
71 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
72                                                         unsigned short tag)
73 {
74         BUG_ON(st->top >= st->size);
75         st->stack[st->top++] = tag;
76 }
77
78 static bool tag_is_empty(struct mvumi_tag *st)
79 {
80         if (st->top == 0)
81                 return 1;
82         else
83                 return 0;
84 }
85
86 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
87 {
88         int i;
89
90         for (i = 0; i < MAX_BASE_ADDRESS; i++)
91                 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
92                                                                 addr_array[i])
93                         pci_iounmap(dev, addr_array[i]);
94 }
95
96 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
97 {
98         int i;
99
100         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
101                 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
102                         addr_array[i] = pci_iomap(dev, i, 0);
103                         if (!addr_array[i]) {
104                                 dev_err(&dev->dev, "failed to map Bar[%d]\n",
105                                                                         i);
106                                 mvumi_unmap_pci_addr(dev, addr_array);
107                                 return -ENOMEM;
108                         }
109                 } else
110                         addr_array[i] = NULL;
111
112                 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
113         }
114
115         return 0;
116 }
117
118 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
119                                 enum resource_type type, unsigned int size)
120 {
121         struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
122
123         if (!res) {
124                 dev_err(&mhba->pdev->dev,
125                         "Failed to allocate memory for resouce manager.\n");
126                 return NULL;
127         }
128
129         switch (type) {
130         case RESOURCE_CACHED_MEMORY:
131                 res->virt_addr = kzalloc(size, GFP_KERNEL);
132                 if (!res->virt_addr) {
133                         dev_err(&mhba->pdev->dev,
134                                 "unable to allocate memory,size = %d.\n", size);
135                         kfree(res);
136                         return NULL;
137                 }
138                 break;
139
140         case RESOURCE_UNCACHED_MEMORY:
141                 size = round_up(size, 8);
142                 res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
143                                                         &res->bus_addr);
144                 if (!res->virt_addr) {
145                         dev_err(&mhba->pdev->dev,
146                                         "unable to allocate consistent mem,"
147                                                         "size = %d.\n", size);
148                         kfree(res);
149                         return NULL;
150                 }
151                 memset(res->virt_addr, 0, size);
152                 break;
153
154         default:
155                 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
156                 kfree(res);
157                 return NULL;
158         }
159
160         res->type = type;
161         res->size = size;
162         INIT_LIST_HEAD(&res->entry);
163         list_add_tail(&res->entry, &mhba->res_list);
164
165         return res;
166 }
167
168 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
169 {
170         struct mvumi_res *res, *tmp;
171
172         list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
173                 switch (res->type) {
174                 case RESOURCE_UNCACHED_MEMORY:
175                         pci_free_consistent(mhba->pdev, res->size,
176                                                 res->virt_addr, res->bus_addr);
177                         break;
178                 case RESOURCE_CACHED_MEMORY:
179                         kfree(res->virt_addr);
180                         break;
181                 default:
182                         dev_err(&mhba->pdev->dev,
183                                 "unknown resource type %d\n", res->type);
184                         break;
185                 }
186                 list_del(&res->entry);
187                 kfree(res);
188         }
189         mhba->fw_flag &= ~MVUMI_FW_ALLOC;
190 }
191
192 /**
193  * mvumi_make_sgl -     Prepares  SGL
194  * @mhba:               Adapter soft state
195  * @scmd:               SCSI command from the mid-layer
196  * @sgl_p:              SGL to be filled in
197  * @sg_count            return the number of SG elements
198  *
199  * If successful, this function returns 0. otherwise, it returns -1.
200  */
201 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
202                                         void *sgl_p, unsigned char *sg_count)
203 {
204         struct scatterlist *sg;
205         struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
206         unsigned int i;
207         unsigned int sgnum = scsi_sg_count(scmd);
208         dma_addr_t busaddr;
209
210         if (sgnum) {
211                 sg = scsi_sglist(scmd);
212                 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
213                                 (int) scmd->sc_data_direction);
214                 if (*sg_count > mhba->max_sge) {
215                         dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
216                                                 "than max sg[0x%x].\n",
217                                                 *sg_count, mhba->max_sge);
218                         return -1;
219                 }
220                 for (i = 0; i < *sg_count; i++) {
221                         busaddr = sg_dma_address(&sg[i]);
222                         m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
223                         m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
224                         m_sg->flags = 0;
225                         m_sg->size = cpu_to_le32(sg_dma_len(&sg[i]));
226                         if ((i + 1) == *sg_count)
227                                 m_sg->flags |= SGD_EOT;
228
229                         m_sg++;
230                 }
231         } else {
232                 scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
233                         pci_map_single(mhba->pdev, scsi_sglist(scmd),
234                                 scsi_bufflen(scmd),
235                                 (int) scmd->sc_data_direction)
236                         : 0;
237                 busaddr = scmd->SCp.dma_handle;
238                 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
239                 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
240                 m_sg->flags = SGD_EOT;
241                 m_sg->size = cpu_to_le32(scsi_bufflen(scmd));
242                 *sg_count = 1;
243         }
244
245         return 0;
246 }
247
248 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
249                                                         unsigned int size)
250 {
251         struct mvumi_sgl *m_sg;
252         void *virt_addr;
253         dma_addr_t phy_addr;
254
255         if (size == 0)
256                 return 0;
257
258         virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
259         if (!virt_addr)
260                 return -1;
261
262         memset(virt_addr, 0, size);
263
264         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
265         cmd->frame->sg_counts = 1;
266         cmd->data_buf = virt_addr;
267
268         m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
269         m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
270         m_sg->flags = SGD_EOT;
271         m_sg->size = cpu_to_le32(size);
272
273         return 0;
274 }
275
276 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
277                                 unsigned int buf_size)
278 {
279         struct mvumi_cmd *cmd;
280
281         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
282         if (!cmd) {
283                 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
284                 return NULL;
285         }
286         INIT_LIST_HEAD(&cmd->queue_pointer);
287
288         cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
289         if (!cmd->frame) {
290                 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
291                         " frame,size = %d.\n", mhba->ib_max_size);
292                 kfree(cmd);
293                 return NULL;
294         }
295
296         if (buf_size) {
297                 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
298                         dev_err(&mhba->pdev->dev, "failed to allocate memory"
299                                                 " for internal frame\n");
300                         kfree(cmd->frame);
301                         kfree(cmd);
302                         return NULL;
303                 }
304         } else
305                 cmd->frame->sg_counts = 0;
306
307         return cmd;
308 }
309
310 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
311                                                 struct mvumi_cmd *cmd)
312 {
313         struct mvumi_sgl *m_sg;
314         unsigned int size;
315         dma_addr_t phy_addr;
316
317         if (cmd && cmd->frame) {
318                 if (cmd->frame->sg_counts) {
319                         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
320                         size = m_sg->size;
321
322                         phy_addr = (dma_addr_t) m_sg->baseaddr_l |
323                                 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
324
325                         pci_free_consistent(mhba->pdev, size, cmd->data_buf,
326                                                                 phy_addr);
327                 }
328                 kfree(cmd->frame);
329                 kfree(cmd);
330         }
331 }
332
333 /**
334  * mvumi_get_cmd -      Get a command from the free pool
335  * @mhba:               Adapter soft state
336  *
337  * Returns a free command from the pool
338  */
339 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
340 {
341         struct mvumi_cmd *cmd = NULL;
342
343         if (likely(!list_empty(&mhba->cmd_pool))) {
344                 cmd = list_entry((&mhba->cmd_pool)->next,
345                                 struct mvumi_cmd, queue_pointer);
346                 list_del_init(&cmd->queue_pointer);
347         } else
348                 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
349
350         return cmd;
351 }
352
353 /**
354  * mvumi_return_cmd -   Return a cmd to free command pool
355  * @mhba:               Adapter soft state
356  * @cmd:                Command packet to be returned to free command pool
357  */
358 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
359                                                 struct mvumi_cmd *cmd)
360 {
361         cmd->scmd = NULL;
362         list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
363 }
364
365 /**
366  * mvumi_free_cmds -    Free all the cmds in the free cmd pool
367  * @mhba:               Adapter soft state
368  */
369 static void mvumi_free_cmds(struct mvumi_hba *mhba)
370 {
371         struct mvumi_cmd *cmd;
372
373         while (!list_empty(&mhba->cmd_pool)) {
374                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
375                                                         queue_pointer);
376                 list_del(&cmd->queue_pointer);
377                 kfree(cmd->frame);
378                 kfree(cmd);
379         }
380 }
381
382 /**
383  * mvumi_alloc_cmds -   Allocates the command packets
384  * @mhba:               Adapter soft state
385  *
386  */
387 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
388 {
389         int i;
390         struct mvumi_cmd *cmd;
391
392         for (i = 0; i < mhba->max_io; i++) {
393                 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
394                 if (!cmd)
395                         goto err_exit;
396
397                 INIT_LIST_HEAD(&cmd->queue_pointer);
398                 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
399                 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
400                 if (!cmd->frame)
401                         goto err_exit;
402         }
403         return 0;
404
405 err_exit:
406         dev_err(&mhba->pdev->dev,
407                         "failed to allocate memory for cmd[0x%x].\n", i);
408         while (!list_empty(&mhba->cmd_pool)) {
409                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
410                                                 queue_pointer);
411                 list_del(&cmd->queue_pointer);
412                 kfree(cmd->frame);
413                 kfree(cmd);
414         }
415         return -ENOMEM;
416 }
417
418 static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
419 {
420         unsigned int ib_rp_reg, cur_ib_entry;
421
422         if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
423                 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
424                 return -1;
425         }
426         ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER);
427
428         if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) ==
429                         (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) &&
430                         ((ib_rp_reg & CL_POINTER_TOGGLE) !=
431                         (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) {
432                 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
433                 return -1;
434         }
435
436         cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK;
437         cur_ib_entry++;
438         if (cur_ib_entry >= mhba->list_num_io) {
439                 cur_ib_entry -= mhba->list_num_io;
440                 mhba->ib_cur_slot ^= CL_POINTER_TOGGLE;
441         }
442         mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
443         mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
444         *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
445         atomic_inc(&mhba->fw_outstanding);
446
447         return 0;
448 }
449
450 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
451 {
452         iowrite32(0xfff, mhba->ib_shadow);
453         iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER);
454 }
455
456 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
457                 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
458 {
459         unsigned short tag, request_id;
460
461         udelay(1);
462         p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
463         request_id = p_outb_frame->request_id;
464         tag = p_outb_frame->tag;
465         if (tag > mhba->tag_pool.size) {
466                 dev_err(&mhba->pdev->dev, "ob frame data error\n");
467                 return -1;
468         }
469         if (mhba->tag_cmd[tag] == NULL) {
470                 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
471                 return -1;
472         } else if (mhba->tag_cmd[tag]->request_id != request_id &&
473                                                 mhba->request_id_enabled) {
474                         dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
475                                         "cmd request ID:0x%x\n", request_id,
476                                         mhba->tag_cmd[tag]->request_id);
477                         return -1;
478         }
479
480         return 0;
481 }
482
483 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
484 {
485         unsigned int ob_write_reg, ob_write_shadow_reg;
486         unsigned int cur_obf, assign_obf_end, i;
487         struct mvumi_ob_data *ob_data;
488         struct mvumi_rsp_frame *p_outb_frame;
489
490         do {
491                 ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER);
492                 ob_write_shadow_reg = ioread32(mhba->ob_shadow);
493         } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg);
494
495         cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK;
496         assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK;
497
498         if ((ob_write_reg & CL_POINTER_TOGGLE) !=
499                                 (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) {
500                 assign_obf_end += mhba->list_num_io;
501         }
502
503         for (i = (assign_obf_end - cur_obf); i != 0; i--) {
504                 cur_obf++;
505                 if (cur_obf >= mhba->list_num_io) {
506                         cur_obf -= mhba->list_num_io;
507                         mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
508                 }
509
510                 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
511
512                 /* Copy pointer may point to entry in outbound list
513                 *  before entry has valid data
514                 */
515                 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
516                         mhba->tag_cmd[p_outb_frame->tag] == NULL ||
517                         p_outb_frame->request_id !=
518                                 mhba->tag_cmd[p_outb_frame->tag]->request_id))
519                         if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
520                                 continue;
521
522                 if (!list_empty(&mhba->ob_data_list)) {
523                         ob_data = (struct mvumi_ob_data *)
524                                 list_first_entry(&mhba->ob_data_list,
525                                         struct mvumi_ob_data, list);
526                         list_del_init(&ob_data->list);
527                 } else {
528                         ob_data = NULL;
529                         if (cur_obf == 0) {
530                                 cur_obf = mhba->list_num_io - 1;
531                                 mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
532                         } else
533                                 cur_obf -= 1;
534                         break;
535                 }
536
537                 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
538                 p_outb_frame->tag = 0xff;
539
540                 list_add_tail(&ob_data->list, &mhba->free_ob_list);
541         }
542         mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK;
543         mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK);
544         iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER);
545 }
546
547 static void mvumi_reset(void *regs)
548 {
549         iowrite32(0, regs + CPU_ENPOINTA_MASK_REG);
550         if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
551                 return;
552
553         iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
554 }
555
556 static unsigned char mvumi_start(struct mvumi_hba *mhba);
557
558 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
559 {
560         mhba->fw_state = FW_STATE_ABORT;
561         mvumi_reset(mhba->mmio);
562
563         if (mvumi_start(mhba))
564                 return FAILED;
565         else
566                 return SUCCESS;
567 }
568
569 static int mvumi_host_reset(struct scsi_cmnd *scmd)
570 {
571         struct mvumi_hba *mhba;
572
573         mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
574
575         scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
576                         scmd->serial_number, scmd->cmnd[0], scmd->retries);
577
578         return mvumi_wait_for_outstanding(mhba);
579 }
580
581 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
582                                                 struct mvumi_cmd *cmd)
583 {
584         unsigned long flags;
585
586         cmd->cmd_status = REQ_STATUS_PENDING;
587
588         if (atomic_read(&cmd->sync_cmd)) {
589                 dev_err(&mhba->pdev->dev,
590                         "last blocked cmd not finished, sync_cmd = %d\n",
591                                                 atomic_read(&cmd->sync_cmd));
592                 BUG_ON(1);
593                 return -1;
594         }
595         atomic_inc(&cmd->sync_cmd);
596         spin_lock_irqsave(mhba->shost->host_lock, flags);
597         mhba->instancet->fire_cmd(mhba, cmd);
598         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
599
600         wait_event_timeout(mhba->int_cmd_wait_q,
601                 (cmd->cmd_status != REQ_STATUS_PENDING),
602                 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
603
604         /* command timeout */
605         if (atomic_read(&cmd->sync_cmd)) {
606                 spin_lock_irqsave(mhba->shost->host_lock, flags);
607                 atomic_dec(&cmd->sync_cmd);
608                 if (mhba->tag_cmd[cmd->frame->tag]) {
609                         mhba->tag_cmd[cmd->frame->tag] = 0;
610                         dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
611                                                         cmd->frame->tag);
612                         tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
613                 }
614                 if (!list_empty(&cmd->queue_pointer)) {
615                         dev_warn(&mhba->pdev->dev,
616                                 "TIMEOUT:A internal command doesn't send!\n");
617                         list_del_init(&cmd->queue_pointer);
618                 } else
619                         atomic_dec(&mhba->fw_outstanding);
620
621                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
622         }
623         return 0;
624 }
625
626 static void mvumi_release_fw(struct mvumi_hba *mhba)
627 {
628         mvumi_free_cmds(mhba);
629         mvumi_release_mem_resource(mhba);
630         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
631         kfree(mhba->handshake_page);
632         pci_release_regions(mhba->pdev);
633 }
634
635 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
636 {
637         struct mvumi_cmd *cmd;
638         struct mvumi_msg_frame *frame;
639         unsigned char device_id, retry = 0;
640         unsigned char bitcount = sizeof(unsigned char) * 8;
641
642         for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
643                 if (!(mhba->target_map[device_id / bitcount] &
644                                 (1 << (device_id % bitcount))))
645                         continue;
646 get_cmd:        cmd = mvumi_create_internal_cmd(mhba, 0);
647                 if (!cmd) {
648                         if (retry++ >= 5) {
649                                 dev_err(&mhba->pdev->dev, "failed to get memory"
650                                         " for internal flush cache cmd for "
651                                         "device %d", device_id);
652                                 retry = 0;
653                                 continue;
654                         } else
655                                 goto get_cmd;
656                 }
657                 cmd->scmd = NULL;
658                 cmd->cmd_status = REQ_STATUS_PENDING;
659                 atomic_set(&cmd->sync_cmd, 0);
660                 frame = cmd->frame;
661                 frame->req_function = CL_FUN_SCSI_CMD;
662                 frame->device_id = device_id;
663                 frame->cmd_flag = CMD_FLAG_NON_DATA;
664                 frame->data_transfer_length = 0;
665                 frame->cdb_length = MAX_COMMAND_SIZE;
666                 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
667                 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
668                 frame->cdb[2] = CDB_CORE_SHUTDOWN;
669
670                 mvumi_issue_blocked_cmd(mhba, cmd);
671                 if (cmd->cmd_status != SAM_STAT_GOOD) {
672                         dev_err(&mhba->pdev->dev,
673                                 "device %d flush cache failed, status=0x%x.\n",
674                                 device_id, cmd->cmd_status);
675                 }
676
677                 mvumi_delete_internal_cmd(mhba, cmd);
678         }
679         return 0;
680 }
681
682 static unsigned char
683 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
684                                                         unsigned short len)
685 {
686         unsigned char *ptr;
687         unsigned char ret = 0, i;
688
689         ptr = (unsigned char *) p_header->frame_content;
690         for (i = 0; i < len; i++) {
691                 ret ^= *ptr;
692                 ptr++;
693         }
694
695         return ret;
696 }
697
698 void mvumi_hs_build_page(struct mvumi_hba *mhba,
699                                 struct mvumi_hs_header *hs_header)
700 {
701         struct mvumi_hs_page2 *hs_page2;
702         struct mvumi_hs_page4 *hs_page4;
703         struct mvumi_hs_page3 *hs_page3;
704         struct timeval time;
705         unsigned int local_time;
706
707         switch (hs_header->page_code) {
708         case HS_PAGE_HOST_INFO:
709                 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
710                 hs_header->frame_length = sizeof(*hs_page2) - 4;
711                 memset(hs_header->frame_content, 0, hs_header->frame_length);
712                 hs_page2->host_type = 3; /* 3 mean linux*/
713                 hs_page2->host_ver.ver_major = VER_MAJOR;
714                 hs_page2->host_ver.ver_minor = VER_MINOR;
715                 hs_page2->host_ver.ver_oem = VER_OEM;
716                 hs_page2->host_ver.ver_build = VER_BUILD;
717                 hs_page2->system_io_bus = 0;
718                 hs_page2->slot_number = 0;
719                 hs_page2->intr_level = 0;
720                 hs_page2->intr_vector = 0;
721                 do_gettimeofday(&time);
722                 local_time = (unsigned int) (time.tv_sec -
723                                                 (sys_tz.tz_minuteswest * 60));
724                 hs_page2->seconds_since1970 = local_time;
725                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
726                                                 hs_header->frame_length);
727                 break;
728
729         case HS_PAGE_FIRM_CTL:
730                 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
731                 hs_header->frame_length = sizeof(*hs_page3) - 4;
732                 memset(hs_header->frame_content, 0, hs_header->frame_length);
733                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
734                                                 hs_header->frame_length);
735                 break;
736
737         case HS_PAGE_CL_INFO:
738                 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
739                 hs_header->frame_length = sizeof(*hs_page4) - 4;
740                 memset(hs_header->frame_content, 0, hs_header->frame_length);
741                 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
742                 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
743
744                 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
745                 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
746                 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
747                 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
748                 hs_page4->ob_depth = mhba->list_num_io;
749                 hs_page4->ib_depth = mhba->list_num_io;
750                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
751                                                 hs_header->frame_length);
752                 break;
753
754         default:
755                 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
756                         hs_header->page_code);
757                 break;
758         }
759 }
760
761 /**
762  * mvumi_init_data -    Initialize requested date for FW
763  * @mhba:                       Adapter soft state
764  */
765 static int mvumi_init_data(struct mvumi_hba *mhba)
766 {
767         struct mvumi_ob_data *ob_pool;
768         struct mvumi_res *res_mgnt;
769         unsigned int tmp_size, offset, i;
770         void *virmem, *v;
771         dma_addr_t p;
772
773         if (mhba->fw_flag & MVUMI_FW_ALLOC)
774                 return 0;
775
776         tmp_size = mhba->ib_max_size * mhba->max_io;
777         tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
778         tmp_size += 8 + sizeof(u32) + 16;
779
780         res_mgnt = mvumi_alloc_mem_resource(mhba,
781                                         RESOURCE_UNCACHED_MEMORY, tmp_size);
782         if (!res_mgnt) {
783                 dev_err(&mhba->pdev->dev,
784                         "failed to allocate memory for inbound list\n");
785                 goto fail_alloc_dma_buf;
786         }
787
788         p = res_mgnt->bus_addr;
789         v = res_mgnt->virt_addr;
790         /* ib_list */
791         offset = round_up(p, 128) - p;
792         p += offset;
793         v += offset;
794         mhba->ib_list = v;
795         mhba->ib_list_phys = p;
796         v += mhba->ib_max_size * mhba->max_io;
797         p += mhba->ib_max_size * mhba->max_io;
798         /* ib shadow */
799         offset = round_up(p, 8) - p;
800         p += offset;
801         v += offset;
802         mhba->ib_shadow = v;
803         mhba->ib_shadow_phys = p;
804         p += sizeof(u32);
805         v += sizeof(u32);
806         /* ob shadow */
807         offset = round_up(p, 8) - p;
808         p += offset;
809         v += offset;
810         mhba->ob_shadow = v;
811         mhba->ob_shadow_phys = p;
812         p += 8;
813         v += 8;
814
815         /* ob list */
816         offset = round_up(p, 128) - p;
817         p += offset;
818         v += offset;
819
820         mhba->ob_list = v;
821         mhba->ob_list_phys = p;
822
823         /* ob data pool */
824         tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
825         tmp_size = round_up(tmp_size, 8);
826
827         res_mgnt = mvumi_alloc_mem_resource(mhba,
828                                 RESOURCE_CACHED_MEMORY, tmp_size);
829         if (!res_mgnt) {
830                 dev_err(&mhba->pdev->dev,
831                         "failed to allocate memory for outbound data buffer\n");
832                 goto fail_alloc_dma_buf;
833         }
834         virmem = res_mgnt->virt_addr;
835
836         for (i = mhba->max_io; i != 0; i--) {
837                 ob_pool = (struct mvumi_ob_data *) virmem;
838                 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
839                 virmem += mhba->ob_max_size + sizeof(*ob_pool);
840         }
841
842         tmp_size = sizeof(unsigned short) * mhba->max_io +
843                                 sizeof(struct mvumi_cmd *) * mhba->max_io;
844         tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
845                                                 (sizeof(unsigned char) * 8);
846
847         res_mgnt = mvumi_alloc_mem_resource(mhba,
848                                 RESOURCE_CACHED_MEMORY, tmp_size);
849         if (!res_mgnt) {
850                 dev_err(&mhba->pdev->dev,
851                         "failed to allocate memory for tag and target map\n");
852                 goto fail_alloc_dma_buf;
853         }
854
855         virmem = res_mgnt->virt_addr;
856         mhba->tag_pool.stack = virmem;
857         mhba->tag_pool.size = mhba->max_io;
858         tag_init(&mhba->tag_pool, mhba->max_io);
859         virmem += sizeof(unsigned short) * mhba->max_io;
860
861         mhba->tag_cmd = virmem;
862         virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
863
864         mhba->target_map = virmem;
865
866         mhba->fw_flag |= MVUMI_FW_ALLOC;
867         return 0;
868
869 fail_alloc_dma_buf:
870         mvumi_release_mem_resource(mhba);
871         return -1;
872 }
873
874 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
875                                 struct mvumi_hs_header *hs_header)
876 {
877         struct mvumi_hs_page1 *hs_page1;
878         unsigned char page_checksum;
879
880         page_checksum = mvumi_calculate_checksum(hs_header,
881                                                 hs_header->frame_length);
882         if (page_checksum != hs_header->checksum) {
883                 dev_err(&mhba->pdev->dev, "checksum error\n");
884                 return -1;
885         }
886
887         switch (hs_header->page_code) {
888         case HS_PAGE_FIRM_CAP:
889                 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
890
891                 mhba->max_io = hs_page1->max_io_support;
892                 mhba->list_num_io = hs_page1->cl_inout_list_depth;
893                 mhba->max_transfer_size = hs_page1->max_transfer_size;
894                 mhba->max_target_id = hs_page1->max_devices_support;
895                 mhba->hba_capability = hs_page1->capability;
896                 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
897                 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
898
899                 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
900                 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
901
902                 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
903                                                 hs_page1->fw_ver.ver_build);
904
905                 break;
906         default:
907                 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
908                 return -1;
909         }
910         return 0;
911 }
912
913 /**
914  * mvumi_handshake -    Move the FW to READY state
915  * @mhba:                               Adapter soft state
916  *
917  * During the initialization, FW passes can potentially be in any one of
918  * several possible states. If the FW in operational, waiting-for-handshake
919  * states, driver must take steps to bring it to ready state. Otherwise, it
920  * has to wait for the ready state.
921  */
922 static int mvumi_handshake(struct mvumi_hba *mhba)
923 {
924         unsigned int hs_state, tmp, hs_fun;
925         struct mvumi_hs_header *hs_header;
926         void *regs = mhba->mmio;
927
928         if (mhba->fw_state == FW_STATE_STARTING)
929                 hs_state = HS_S_START;
930         else {
931                 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0);
932                 hs_state = HS_GET_STATE(tmp);
933                 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
934                 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
935                         mhba->fw_state = FW_STATE_STARTING;
936                         return -1;
937                 }
938         }
939
940         hs_fun = 0;
941         switch (hs_state) {
942         case HS_S_START:
943                 mhba->fw_state = FW_STATE_HANDSHAKING;
944                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
945                 HS_SET_STATE(hs_fun, HS_S_RESET);
946                 iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1);
947                 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
948                 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
949                 break;
950
951         case HS_S_RESET:
952                 iowrite32(lower_32_bits(mhba->handshake_page_phys),
953                                         regs + CPU_PCIEA_TO_ARM_MSG1);
954                 iowrite32(upper_32_bits(mhba->handshake_page_phys),
955                                         regs + CPU_ARM_TO_PCIEA_MSG1);
956                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
957                 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
958                 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
959                 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
960
961                 break;
962
963         case HS_S_PAGE_ADDR:
964         case HS_S_QUERY_PAGE:
965         case HS_S_SEND_PAGE:
966                 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
967                 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
968                         mhba->hba_total_pages =
969                         ((struct mvumi_hs_page1 *) hs_header)->total_pages;
970
971                         if (mhba->hba_total_pages == 0)
972                                 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
973                 }
974
975                 if (hs_state == HS_S_QUERY_PAGE) {
976                         if (mvumi_hs_process_page(mhba, hs_header)) {
977                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
978                                 return -1;
979                         }
980                         if (mvumi_init_data(mhba)) {
981                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
982                                 return -1;
983                         }
984                 } else if (hs_state == HS_S_PAGE_ADDR) {
985                         hs_header->page_code = 0;
986                         mhba->hba_total_pages = HS_PAGE_TOTAL-1;
987                 }
988
989                 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
990                         hs_header->page_code++;
991                         if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
992                                 mvumi_hs_build_page(mhba, hs_header);
993                                 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
994                         } else
995                                 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
996                 } else
997                         HS_SET_STATE(hs_fun, HS_S_END);
998
999                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1000                 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
1001                 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
1002                 break;
1003
1004         case HS_S_END:
1005                 /* Set communication list ISR */
1006                 tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1007                 tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR;
1008                 iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
1009                 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1010                 /* Set InBound List Avaliable count shadow */
1011                 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1012                                         regs + CLA_INB_AVAL_COUNT_BASEL);
1013                 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1014                                         regs + CLA_INB_AVAL_COUNT_BASEH);
1015
1016                 /* Set OutBound List Avaliable count shadow */
1017                 iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE,
1018                                                 mhba->ob_shadow);
1019                 iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0);
1020                 iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4);
1021
1022                 mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
1023                 mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
1024                 mhba->fw_state = FW_STATE_STARTED;
1025
1026                 break;
1027         default:
1028                 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1029                                                                 hs_state);
1030                 return -1;
1031         }
1032         return 0;
1033 }
1034
1035 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1036 {
1037         unsigned int isr_status;
1038         unsigned long before;
1039
1040         before = jiffies;
1041         mvumi_handshake(mhba);
1042         do {
1043                 isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio);
1044
1045                 if (mhba->fw_state == FW_STATE_STARTED)
1046                         return 0;
1047                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1048                         dev_err(&mhba->pdev->dev,
1049                                 "no handshake response at state 0x%x.\n",
1050                                   mhba->fw_state);
1051                         dev_err(&mhba->pdev->dev,
1052                                 "isr : global=0x%x,status=0x%x.\n",
1053                                         mhba->global_isr, isr_status);
1054                         return -1;
1055                 }
1056                 rmb();
1057                 usleep_range(1000, 2000);
1058         } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1059
1060         return 0;
1061 }
1062
1063 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1064 {
1065         void *regs = mhba->mmio;
1066         unsigned int tmp;
1067         unsigned long before;
1068
1069         before = jiffies;
1070         tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
1071         while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1072                 if (tmp != HANDSHAKE_READYSTATE)
1073                         iowrite32(DRBL_MU_RESET,
1074                                         regs + CPU_PCIEA_TO_ARM_DRBL_REG);
1075                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1076                         dev_err(&mhba->pdev->dev,
1077                                 "invalid signature [0x%x].\n", tmp);
1078                         return -1;
1079                 }
1080                 usleep_range(1000, 2000);
1081                 rmb();
1082                 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
1083         }
1084
1085         mhba->fw_state = FW_STATE_STARTING;
1086         dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1087         do {
1088                 if (mvumi_handshake_event(mhba)) {
1089                         dev_err(&mhba->pdev->dev,
1090                                         "handshake failed at state 0x%x.\n",
1091                                                 mhba->fw_state);
1092                         return -1;
1093                 }
1094         } while (mhba->fw_state != FW_STATE_STARTED);
1095
1096         dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1097
1098         return 0;
1099 }
1100
1101 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1102 {
1103         void *regs = mhba->mmio;
1104         unsigned int tmp;
1105         /* clear Door bell */
1106         tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1107         iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1108
1109         iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1110         tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA;
1111         iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
1112         if (mvumi_check_handshake(mhba))
1113                 return -1;
1114
1115         return 0;
1116 }
1117
1118 /**
1119  * mvumi_complete_cmd - Completes a command
1120  * @mhba:                       Adapter soft state
1121  * @cmd:                        Command to be completed
1122  */
1123 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1124                                         struct mvumi_rsp_frame *ob_frame)
1125 {
1126         struct scsi_cmnd *scmd = cmd->scmd;
1127
1128         cmd->scmd->SCp.ptr = NULL;
1129         scmd->result = ob_frame->req_status;
1130
1131         switch (ob_frame->req_status) {
1132         case SAM_STAT_GOOD:
1133                 scmd->result |= DID_OK << 16;
1134                 break;
1135         case SAM_STAT_BUSY:
1136                 scmd->result |= DID_BUS_BUSY << 16;
1137                 break;
1138         case SAM_STAT_CHECK_CONDITION:
1139                 scmd->result |= (DID_OK << 16);
1140                 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1141                         memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1142                                 sizeof(struct mvumi_sense_data));
1143                         scmd->result |=  (DRIVER_SENSE << 24);
1144                 }
1145                 break;
1146         default:
1147                 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1148                 break;
1149         }
1150
1151         if (scsi_bufflen(scmd)) {
1152                 if (scsi_sg_count(scmd)) {
1153                         pci_unmap_sg(mhba->pdev,
1154                                 scsi_sglist(scmd),
1155                                 scsi_sg_count(scmd),
1156                                 (int) scmd->sc_data_direction);
1157                 } else {
1158                         pci_unmap_single(mhba->pdev,
1159                                 scmd->SCp.dma_handle,
1160                                 scsi_bufflen(scmd),
1161                                 (int) scmd->sc_data_direction);
1162
1163                         scmd->SCp.dma_handle = 0;
1164                 }
1165         }
1166         cmd->scmd->scsi_done(scmd);
1167         mvumi_return_cmd(mhba, cmd);
1168 }
1169 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1170                                                 struct mvumi_cmd *cmd,
1171                                         struct mvumi_rsp_frame *ob_frame)
1172 {
1173         if (atomic_read(&cmd->sync_cmd)) {
1174                 cmd->cmd_status = ob_frame->req_status;
1175
1176                 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1177                                 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1178                                 cmd->data_buf) {
1179                         memcpy(cmd->data_buf, ob_frame->payload,
1180                                         sizeof(struct mvumi_sense_data));
1181                 }
1182                 atomic_dec(&cmd->sync_cmd);
1183                 wake_up(&mhba->int_cmd_wait_q);
1184         }
1185 }
1186
1187 static void mvumi_show_event(struct mvumi_hba *mhba,
1188                         struct mvumi_driver_event *ptr)
1189 {
1190         unsigned int i;
1191
1192         dev_warn(&mhba->pdev->dev,
1193                 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1194                 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1195         if (ptr->param_count) {
1196                 printk(KERN_WARNING "Event param(len 0x%x): ",
1197                                                 ptr->param_count);
1198                 for (i = 0; i < ptr->param_count; i++)
1199                         printk(KERN_WARNING "0x%x ", ptr->params[i]);
1200
1201                 printk(KERN_WARNING "\n");
1202         }
1203
1204         if (ptr->sense_data_length) {
1205                 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1206                                                 ptr->sense_data_length);
1207                 for (i = 0; i < ptr->sense_data_length; i++)
1208                         printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1209                 printk(KERN_WARNING "\n");
1210         }
1211 }
1212
1213 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1214 {
1215         if (msg == APICDB1_EVENT_GETEVENT) {
1216                 int i, count;
1217                 struct mvumi_driver_event *param = NULL;
1218                 struct mvumi_event_req *er = buffer;
1219                 count = er->count;
1220                 if (count > MAX_EVENTS_RETURNED) {
1221                         dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1222                                         " than max event count[0x%x].\n",
1223                                         count, MAX_EVENTS_RETURNED);
1224                         return;
1225                 }
1226                 for (i = 0; i < count; i++) {
1227                         param = &er->events[i];
1228                         mvumi_show_event(mhba, param);
1229                 }
1230         }
1231 }
1232
1233 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1234 {
1235         struct mvumi_cmd *cmd;
1236         struct mvumi_msg_frame *frame;
1237
1238         cmd = mvumi_create_internal_cmd(mhba, 512);
1239         if (!cmd)
1240                 return -1;
1241         cmd->scmd = NULL;
1242         cmd->cmd_status = REQ_STATUS_PENDING;
1243         atomic_set(&cmd->sync_cmd, 0);
1244         frame = cmd->frame;
1245         frame->device_id = 0;
1246         frame->cmd_flag = CMD_FLAG_DATA_IN;
1247         frame->req_function = CL_FUN_SCSI_CMD;
1248         frame->cdb_length = MAX_COMMAND_SIZE;
1249         frame->data_transfer_length = sizeof(struct mvumi_event_req);
1250         memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1251         frame->cdb[0] = APICDB0_EVENT;
1252         frame->cdb[1] = msg;
1253         mvumi_issue_blocked_cmd(mhba, cmd);
1254
1255         if (cmd->cmd_status != SAM_STAT_GOOD)
1256                 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1257                                                         cmd->cmd_status);
1258         else
1259                 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1260
1261         mvumi_delete_internal_cmd(mhba, cmd);
1262         return 0;
1263 }
1264
1265 static void mvumi_scan_events(struct work_struct *work)
1266 {
1267         struct mvumi_events_wq *mu_ev =
1268                 container_of(work, struct mvumi_events_wq, work_q);
1269
1270         mvumi_get_event(mu_ev->mhba, mu_ev->event);
1271         kfree(mu_ev);
1272 }
1273
1274 static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg)
1275 {
1276         struct mvumi_events_wq *mu_ev;
1277
1278         mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1279         if (mu_ev) {
1280                 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1281                 mu_ev->mhba = mhba;
1282                 mu_ev->event = msg;
1283                 mu_ev->param = NULL;
1284                 schedule_work(&mu_ev->work_q);
1285         }
1286 }
1287
1288 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1289 {
1290         struct mvumi_rsp_frame *ob_frame;
1291         struct mvumi_cmd *cmd;
1292         struct mvumi_ob_data *pool;
1293
1294         while (!list_empty(&mhba->free_ob_list)) {
1295                 pool = list_first_entry(&mhba->free_ob_list,
1296                                                 struct mvumi_ob_data, list);
1297                 list_del_init(&pool->list);
1298                 list_add_tail(&pool->list, &mhba->ob_data_list);
1299
1300                 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1301                 cmd = mhba->tag_cmd[ob_frame->tag];
1302
1303                 atomic_dec(&mhba->fw_outstanding);
1304                 mhba->tag_cmd[ob_frame->tag] = 0;
1305                 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1306                 if (cmd->scmd)
1307                         mvumi_complete_cmd(mhba, cmd, ob_frame);
1308                 else
1309                         mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1310         }
1311         mhba->instancet->fire_cmd(mhba, NULL);
1312 }
1313
1314 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1315 {
1316         struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1317         unsigned long flags;
1318
1319         spin_lock_irqsave(mhba->shost->host_lock, flags);
1320         if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1321                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1322                 return IRQ_NONE;
1323         }
1324
1325         if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) {
1326                 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1327                         dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1328                         mvumi_handshake(mhba);
1329                 }
1330                 if (mhba->isr_status & DRBL_EVENT_NOTIFY)
1331                         mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
1332         }
1333
1334         if (mhba->global_isr & INT_MAP_COMAOUT)
1335                 mvumi_receive_ob_list_entry(mhba);
1336
1337         mhba->global_isr = 0;
1338         mhba->isr_status = 0;
1339         if (mhba->fw_state == FW_STATE_STARTED)
1340                 mvumi_handle_clob(mhba);
1341         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1342         return IRQ_HANDLED;
1343 }
1344
1345 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1346                                                 struct mvumi_cmd *cmd)
1347 {
1348         void *ib_entry;
1349         struct mvumi_msg_frame *ib_frame;
1350         unsigned int frame_len;
1351
1352         ib_frame = cmd->frame;
1353         if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1354                 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1355                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1356         }
1357         if (tag_is_empty(&mhba->tag_pool)) {
1358                 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1359                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1360         }
1361         if (mvumi_get_ib_list_entry(mhba, &ib_entry))
1362                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1363
1364         cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1365         cmd->frame->request_id = mhba->io_seq++;
1366         cmd->request_id = cmd->frame->request_id;
1367         mhba->tag_cmd[cmd->frame->tag] = cmd;
1368         frame_len = sizeof(*ib_frame) - 4 +
1369                                 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1370         memcpy(ib_entry, ib_frame, frame_len);
1371         return MV_QUEUE_COMMAND_RESULT_SENT;
1372 }
1373
1374 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1375 {
1376         unsigned short num_of_cl_sent = 0;
1377         enum mvumi_qc_result result;
1378
1379         if (cmd)
1380                 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1381
1382         while (!list_empty(&mhba->waiting_req_list)) {
1383                 cmd = list_first_entry(&mhba->waiting_req_list,
1384                                          struct mvumi_cmd, queue_pointer);
1385                 list_del_init(&cmd->queue_pointer);
1386                 result = mvumi_send_command(mhba, cmd);
1387                 switch (result) {
1388                 case MV_QUEUE_COMMAND_RESULT_SENT:
1389                         num_of_cl_sent++;
1390                         break;
1391                 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1392                         list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1393                         if (num_of_cl_sent > 0)
1394                                 mvumi_send_ib_list_entry(mhba);
1395
1396                         return;
1397                 }
1398         }
1399         if (num_of_cl_sent > 0)
1400                 mvumi_send_ib_list_entry(mhba);
1401 }
1402
1403 /**
1404  * mvumi_enable_intr -  Enables interrupts
1405  * @regs:                       FW register set
1406  */
1407 static void mvumi_enable_intr(void *regs)
1408 {
1409         unsigned int mask;
1410
1411         iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1412         mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1413         mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR;
1414         iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
1415 }
1416
1417 /**
1418  * mvumi_disable_intr -Disables interrupt
1419  * @regs:                       FW register set
1420  */
1421 static void mvumi_disable_intr(void *regs)
1422 {
1423         unsigned int mask;
1424
1425         iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1426         mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1427         mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR);
1428         iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
1429 }
1430
1431 static int mvumi_clear_intr(void *extend)
1432 {
1433         struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1434         unsigned int status, isr_status = 0, tmp = 0;
1435         void *regs = mhba->mmio;
1436
1437         status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG);
1438         if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF)
1439                 return 1;
1440         if (unlikely(status & INT_MAP_COMAERR)) {
1441                 tmp = ioread32(regs + CLA_ISR_CAUSE);
1442                 if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ))
1443                         iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ),
1444                                         regs + CLA_ISR_CAUSE);
1445                 status ^= INT_MAP_COMAERR;
1446                 /* inbound or outbound parity error, command will timeout */
1447         }
1448         if (status & INT_MAP_COMAOUT) {
1449                 tmp = ioread32(regs + CLA_ISR_CAUSE);
1450                 if (tmp & CLIC_OUT_IRQ)
1451                         iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE);
1452         }
1453         if (status & INT_MAP_DL_CPU2PCIEA) {
1454                 isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1455                 if (isr_status)
1456                         iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1457         }
1458
1459         mhba->global_isr = status;
1460         mhba->isr_status = isr_status;
1461
1462         return 0;
1463 }
1464
1465 /**
1466  * mvumi_read_fw_status_reg - returns the current FW status value
1467  * @regs:                       FW register set
1468  */
1469 static unsigned int mvumi_read_fw_status_reg(void *regs)
1470 {
1471         unsigned int status;
1472
1473         status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1474         if (status)
1475                 iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1476         return status;
1477 }
1478
1479 static struct mvumi_instance_template mvumi_instance_template = {
1480         .fire_cmd = mvumi_fire_cmd,
1481         .enable_intr = mvumi_enable_intr,
1482         .disable_intr = mvumi_disable_intr,
1483         .clear_intr = mvumi_clear_intr,
1484         .read_fw_status_reg = mvumi_read_fw_status_reg,
1485 };
1486
1487 static int mvumi_slave_configure(struct scsi_device *sdev)
1488 {
1489         struct mvumi_hba *mhba;
1490         unsigned char bitcount = sizeof(unsigned char) * 8;
1491
1492         mhba = (struct mvumi_hba *) sdev->host->hostdata;
1493         if (sdev->id >= mhba->max_target_id)
1494                 return -EINVAL;
1495
1496         mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
1497         return 0;
1498 }
1499
1500 /**
1501  * mvumi_build_frame -  Prepares a direct cdb (DCDB) command
1502  * @mhba:               Adapter soft state
1503  * @scmd:               SCSI command
1504  * @cmd:                Command to be prepared in
1505  *
1506  * This function prepares CDB commands. These are typcially pass-through
1507  * commands to the devices.
1508  */
1509 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
1510                                 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
1511 {
1512         struct mvumi_msg_frame *pframe;
1513
1514         cmd->scmd = scmd;
1515         cmd->cmd_status = REQ_STATUS_PENDING;
1516         pframe = cmd->frame;
1517         pframe->device_id = ((unsigned short) scmd->device->id) |
1518                                 (((unsigned short) scmd->device->lun) << 8);
1519         pframe->cmd_flag = 0;
1520
1521         switch (scmd->sc_data_direction) {
1522         case DMA_NONE:
1523                 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
1524                 break;
1525         case DMA_FROM_DEVICE:
1526                 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
1527                 break;
1528         case DMA_TO_DEVICE:
1529                 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
1530                 break;
1531         case DMA_BIDIRECTIONAL:
1532         default:
1533                 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
1534                         "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
1535                 goto error;
1536         }
1537
1538         pframe->cdb_length = scmd->cmd_len;
1539         memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
1540         pframe->req_function = CL_FUN_SCSI_CMD;
1541         if (scsi_bufflen(scmd)) {
1542                 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
1543                         &pframe->sg_counts))
1544                         goto error;
1545
1546                 pframe->data_transfer_length = scsi_bufflen(scmd);
1547         } else {
1548                 pframe->sg_counts = 0;
1549                 pframe->data_transfer_length = 0;
1550         }
1551         return 0;
1552
1553 error:
1554         scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
1555                 SAM_STAT_CHECK_CONDITION;
1556         scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
1557                                                                         0);
1558         return -1;
1559 }
1560
1561 /**
1562  * mvumi_queue_command -        Queue entry point
1563  * @scmd:                       SCSI command to be queued
1564  * @done:                       Callback entry point
1565  */
1566 static int mvumi_queue_command(struct Scsi_Host *shost,
1567                                         struct scsi_cmnd *scmd)
1568 {
1569         struct mvumi_cmd *cmd;
1570         struct mvumi_hba *mhba;
1571         unsigned long irq_flags;
1572
1573         spin_lock_irqsave(shost->host_lock, irq_flags);
1574         scsi_cmd_get_serial(shost, scmd);
1575
1576         mhba = (struct mvumi_hba *) shost->hostdata;
1577         scmd->result = 0;
1578         cmd = mvumi_get_cmd(mhba);
1579         if (unlikely(!cmd)) {
1580                 spin_unlock_irqrestore(shost->host_lock, irq_flags);
1581                 return SCSI_MLQUEUE_HOST_BUSY;
1582         }
1583
1584         if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
1585                 goto out_return_cmd;
1586
1587         cmd->scmd = scmd;
1588         scmd->SCp.ptr = (char *) cmd;
1589         mhba->instancet->fire_cmd(mhba, cmd);
1590         spin_unlock_irqrestore(shost->host_lock, irq_flags);
1591         return 0;
1592
1593 out_return_cmd:
1594         mvumi_return_cmd(mhba, cmd);
1595         scmd->scsi_done(scmd);
1596         spin_unlock_irqrestore(shost->host_lock, irq_flags);
1597         return 0;
1598 }
1599
1600 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
1601 {
1602         struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
1603         struct Scsi_Host *host = scmd->device->host;
1604         struct mvumi_hba *mhba = shost_priv(host);
1605         unsigned long flags;
1606
1607         spin_lock_irqsave(mhba->shost->host_lock, flags);
1608
1609         if (mhba->tag_cmd[cmd->frame->tag]) {
1610                 mhba->tag_cmd[cmd->frame->tag] = 0;
1611                 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
1612         }
1613         if (!list_empty(&cmd->queue_pointer))
1614                 list_del_init(&cmd->queue_pointer);
1615         else
1616                 atomic_dec(&mhba->fw_outstanding);
1617
1618         scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1619         scmd->SCp.ptr = NULL;
1620         if (scsi_bufflen(scmd)) {
1621                 if (scsi_sg_count(scmd)) {
1622                         pci_unmap_sg(mhba->pdev,
1623                                 scsi_sglist(scmd),
1624                                 scsi_sg_count(scmd),
1625                                 (int)scmd->sc_data_direction);
1626                 } else {
1627                         pci_unmap_single(mhba->pdev,
1628                                 scmd->SCp.dma_handle,
1629                                 scsi_bufflen(scmd),
1630                                 (int)scmd->sc_data_direction);
1631
1632                         scmd->SCp.dma_handle = 0;
1633                 }
1634         }
1635         mvumi_return_cmd(mhba, cmd);
1636         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1637
1638         return BLK_EH_NOT_HANDLED;
1639 }
1640
1641 static int
1642 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1643                         sector_t capacity, int geom[])
1644 {
1645         int heads, sectors;
1646         sector_t cylinders;
1647         unsigned long tmp;
1648
1649         heads = 64;
1650         sectors = 32;
1651         tmp = heads * sectors;
1652         cylinders = capacity;
1653         sector_div(cylinders, tmp);
1654
1655         if (capacity >= 0x200000) {
1656                 heads = 255;
1657                 sectors = 63;
1658                 tmp = heads * sectors;
1659                 cylinders = capacity;
1660                 sector_div(cylinders, tmp);
1661         }
1662         geom[0] = heads;
1663         geom[1] = sectors;
1664         geom[2] = cylinders;
1665
1666         return 0;
1667 }
1668
1669 static struct scsi_host_template mvumi_template = {
1670
1671         .module = THIS_MODULE,
1672         .name = "Marvell Storage Controller",
1673         .slave_configure = mvumi_slave_configure,
1674         .queuecommand = mvumi_queue_command,
1675         .eh_host_reset_handler = mvumi_host_reset,
1676         .bios_param = mvumi_bios_param,
1677         .this_id = -1,
1678 };
1679
1680 static struct scsi_transport_template mvumi_transport_template = {
1681         .eh_timed_out = mvumi_timed_out,
1682 };
1683
1684 /**
1685  * mvumi_init_fw -      Initializes the FW
1686  * @mhba:               Adapter soft state
1687  *
1688  * This is the main function for initializing firmware.
1689  */
1690 static int mvumi_init_fw(struct mvumi_hba *mhba)
1691 {
1692         int ret = 0;
1693
1694         if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
1695                 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
1696                 return -EBUSY;
1697         }
1698         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
1699         if (ret)
1700                 goto fail_ioremap;
1701
1702         mhba->mmio = mhba->base_addr[0];
1703
1704         switch (mhba->pdev->device) {
1705         case PCI_DEVICE_ID_MARVELL_MV9143:
1706                 mhba->instancet = &mvumi_instance_template;
1707                 mhba->io_seq = 0;
1708                 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
1709                 mhba->request_id_enabled = 1;
1710                 break;
1711         default:
1712                 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
1713                                                         mhba->pdev->device);
1714                 mhba->instancet = NULL;
1715                 ret = -EINVAL;
1716                 goto fail_alloc_mem;
1717         }
1718         dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
1719                                                         mhba->pdev->device);
1720
1721         mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL);
1722         if (!mhba->handshake_page) {
1723                 dev_err(&mhba->pdev->dev,
1724                         "failed to allocate memory for handshake\n");
1725                 ret = -ENOMEM;
1726                 goto fail_alloc_mem;
1727         }
1728         mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
1729
1730         if (mvumi_start(mhba)) {
1731                 ret = -EINVAL;
1732                 goto fail_ready_state;
1733         }
1734         ret = mvumi_alloc_cmds(mhba);
1735         if (ret)
1736                 goto fail_ready_state;
1737
1738         return 0;
1739
1740 fail_ready_state:
1741         mvumi_release_mem_resource(mhba);
1742         kfree(mhba->handshake_page);
1743 fail_alloc_mem:
1744         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
1745 fail_ioremap:
1746         pci_release_regions(mhba->pdev);
1747
1748         return ret;
1749 }
1750
1751 /**
1752  * mvumi_io_attach -    Attaches this driver to SCSI mid-layer
1753  * @mhba:               Adapter soft state
1754  */
1755 static int mvumi_io_attach(struct mvumi_hba *mhba)
1756 {
1757         struct Scsi_Host *host = mhba->shost;
1758         int ret;
1759         unsigned int max_sg = (mhba->ib_max_size + 4 -
1760                 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
1761
1762         host->irq = mhba->pdev->irq;
1763         host->unique_id = mhba->unique_id;
1764         host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1765         host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
1766         host->max_sectors = mhba->max_transfer_size / 512;
1767         host->cmd_per_lun =  (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1768         host->max_id = mhba->max_target_id;
1769         host->max_cmd_len = MAX_COMMAND_SIZE;
1770         host->transportt = &mvumi_transport_template;
1771
1772         ret = scsi_add_host(host, &mhba->pdev->dev);
1773         if (ret) {
1774                 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
1775                 return ret;
1776         }
1777         mhba->fw_flag |= MVUMI_FW_ATTACH;
1778         scsi_scan_host(host);
1779
1780         return 0;
1781 }
1782
1783 /**
1784  * mvumi_probe_one -    PCI hotplug entry point
1785  * @pdev:               PCI device structure
1786  * @id:                 PCI ids of supported hotplugged adapter
1787  */
1788 static int __devinit mvumi_probe_one(struct pci_dev *pdev,
1789                                         const struct pci_device_id *id)
1790 {
1791         struct Scsi_Host *host;
1792         struct mvumi_hba *mhba;
1793         int ret;
1794
1795         dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
1796                         pdev->vendor, pdev->device, pdev->subsystem_vendor,
1797                         pdev->subsystem_device);
1798
1799         ret = pci_enable_device(pdev);
1800         if (ret)
1801                 return ret;
1802
1803         pci_set_master(pdev);
1804
1805         if (IS_DMA64) {
1806                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1807                 if (ret) {
1808                         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1809                         if (ret)
1810                                 goto fail_set_dma_mask;
1811                 }
1812         } else {
1813                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1814                 if (ret)
1815                         goto fail_set_dma_mask;
1816         }
1817
1818         host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
1819         if (!host) {
1820                 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
1821                 ret = -ENOMEM;
1822                 goto fail_alloc_instance;
1823         }
1824         mhba = shost_priv(host);
1825
1826         INIT_LIST_HEAD(&mhba->cmd_pool);
1827         INIT_LIST_HEAD(&mhba->ob_data_list);
1828         INIT_LIST_HEAD(&mhba->free_ob_list);
1829         INIT_LIST_HEAD(&mhba->res_list);
1830         INIT_LIST_HEAD(&mhba->waiting_req_list);
1831         atomic_set(&mhba->fw_outstanding, 0);
1832         init_waitqueue_head(&mhba->int_cmd_wait_q);
1833
1834         mhba->pdev = pdev;
1835         mhba->shost = host;
1836         mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
1837
1838         ret = mvumi_init_fw(mhba);
1839         if (ret)
1840                 goto fail_init_fw;
1841
1842         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
1843                                 "mvumi", mhba);
1844         if (ret) {
1845                 dev_err(&pdev->dev, "failed to register IRQ\n");
1846                 goto fail_init_irq;
1847         }
1848         mhba->instancet->enable_intr(mhba->mmio);
1849         pci_set_drvdata(pdev, mhba);
1850
1851         ret = mvumi_io_attach(mhba);
1852         if (ret)
1853                 goto fail_io_attach;
1854         dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
1855
1856         return 0;
1857
1858 fail_io_attach:
1859         pci_set_drvdata(pdev, NULL);
1860         mhba->instancet->disable_intr(mhba->mmio);
1861         free_irq(mhba->pdev->irq, mhba);
1862 fail_init_irq:
1863         mvumi_release_fw(mhba);
1864 fail_init_fw:
1865         scsi_host_put(host);
1866
1867 fail_alloc_instance:
1868 fail_set_dma_mask:
1869         pci_disable_device(pdev);
1870
1871         return ret;
1872 }
1873
1874 static void mvumi_detach_one(struct pci_dev *pdev)
1875 {
1876         struct Scsi_Host *host;
1877         struct mvumi_hba *mhba;
1878
1879         mhba = pci_get_drvdata(pdev);
1880         host = mhba->shost;
1881         scsi_remove_host(mhba->shost);
1882         mvumi_flush_cache(mhba);
1883
1884         mhba->instancet->disable_intr(mhba->mmio);
1885         free_irq(mhba->pdev->irq, mhba);
1886         mvumi_release_fw(mhba);
1887         scsi_host_put(host);
1888         pci_set_drvdata(pdev, NULL);
1889         pci_disable_device(pdev);
1890         dev_dbg(&pdev->dev, "driver is removed!\n");
1891 }
1892
1893 /**
1894  * mvumi_shutdown -     Shutdown entry point
1895  * @device:             Generic device structure
1896  */
1897 static void mvumi_shutdown(struct pci_dev *pdev)
1898 {
1899         struct mvumi_hba *mhba = pci_get_drvdata(pdev);
1900
1901         mvumi_flush_cache(mhba);
1902 }
1903
1904 static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
1905 {
1906         struct mvumi_hba *mhba = NULL;
1907
1908         mhba = pci_get_drvdata(pdev);
1909         mvumi_flush_cache(mhba);
1910
1911         pci_set_drvdata(pdev, mhba);
1912         mhba->instancet->disable_intr(mhba->mmio);
1913         free_irq(mhba->pdev->irq, mhba);
1914         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1915         pci_release_regions(pdev);
1916         pci_save_state(pdev);
1917         pci_disable_device(pdev);
1918         pci_set_power_state(pdev, pci_choose_state(pdev, state));
1919
1920         return 0;
1921 }
1922
1923 static int mvumi_resume(struct pci_dev *pdev)
1924 {
1925         int ret;
1926         struct mvumi_hba *mhba = NULL;
1927
1928         mhba = pci_get_drvdata(pdev);
1929
1930         pci_set_power_state(pdev, PCI_D0);
1931         pci_enable_wake(pdev, PCI_D0, 0);
1932         pci_restore_state(pdev);
1933
1934         ret = pci_enable_device(pdev);
1935         if (ret) {
1936                 dev_err(&pdev->dev, "enable device failed\n");
1937                 return ret;
1938         }
1939         pci_set_master(pdev);
1940         if (IS_DMA64) {
1941                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1942                 if (ret) {
1943                         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1944                         if (ret)
1945                                 goto fail;
1946                 }
1947         } else {
1948                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1949                 if (ret)
1950                         goto fail;
1951         }
1952         ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
1953         if (ret)
1954                 goto fail;
1955         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
1956         if (ret)
1957                 goto release_regions;
1958
1959         mhba->mmio = mhba->base_addr[0];
1960         mvumi_reset(mhba->mmio);
1961
1962         if (mvumi_start(mhba)) {
1963                 ret = -EINVAL;
1964                 goto unmap_pci_addr;
1965         }
1966
1967         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
1968                                 "mvumi", mhba);
1969         if (ret) {
1970                 dev_err(&pdev->dev, "failed to register IRQ\n");
1971                 goto unmap_pci_addr;
1972         }
1973         mhba->instancet->enable_intr(mhba->mmio);
1974
1975         return 0;
1976
1977 unmap_pci_addr:
1978         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1979 release_regions:
1980         pci_release_regions(pdev);
1981 fail:
1982         pci_disable_device(pdev);
1983
1984         return ret;
1985 }
1986
1987 static struct pci_driver mvumi_pci_driver = {
1988
1989         .name = MV_DRIVER_NAME,
1990         .id_table = mvumi_pci_table,
1991         .probe = mvumi_probe_one,
1992         .remove = __devexit_p(mvumi_detach_one),
1993         .shutdown = mvumi_shutdown,
1994 #ifdef CONFIG_PM
1995         .suspend = mvumi_suspend,
1996         .resume = mvumi_resume,
1997 #endif
1998 };
1999
2000 /**
2001  * mvumi_init - Driver load entry point
2002  */
2003 static int __init mvumi_init(void)
2004 {
2005         return pci_register_driver(&mvumi_pci_driver);
2006 }
2007
2008 /**
2009  * mvumi_exit - Driver unload entry point
2010  */
2011 static void __exit mvumi_exit(void)
2012 {
2013
2014         pci_unregister_driver(&mvumi_pci_driver);
2015 }
2016
2017 module_init(mvumi_init);
2018 module_exit(mvumi_exit);