[SCSI] be2iscsi: Fix warnings from new checkpatch.pl
[pandora-kernel.git] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29
30 #include <scsi/libiscsi.h>
31 #include <scsi/scsi_transport_iscsi.h>
32 #include <scsi/scsi_transport.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include "be_main.h"
38 #include "be_iscsi.h"
39 #include "be_mgmt.h"
40
41 static unsigned int be_iopoll_budget = 10;
42 static unsigned int be_max_phys_size = 64;
43 static unsigned int enable_msix = 1;
44
45 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47 MODULE_AUTHOR("ServerEngines Corporation");
48 MODULE_LICENSE("GPL");
49 module_param(be_iopoll_budget, int, 0);
50 module_param(enable_msix, int, 0);
51 module_param(be_max_phys_size, uint, S_IRUGO);
52 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53                                    "contiguous memory that can be allocated."
54                                    "Range is 16 - 128");
55
56 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 {
58         blk_queue_max_segment_size(sdev->request_queue, 65536);
59         return 0;
60 }
61
62 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
63 {
64         struct iscsi_cls_session *cls_session;
65         struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
66         struct beiscsi_io_task *aborted_io_task;
67         struct iscsi_conn *conn;
68         struct beiscsi_conn *beiscsi_conn;
69         struct beiscsi_hba *phba;
70         struct iscsi_session *session;
71         struct invalidate_command_table *inv_tbl;
72         unsigned int cid, tag, num_invalidate;
73
74         cls_session = starget_to_session(scsi_target(sc->device));
75         session = cls_session->dd_data;
76
77         spin_lock_bh(&session->lock);
78         if (!aborted_task || !aborted_task->sc) {
79                 /* we raced */
80                 spin_unlock_bh(&session->lock);
81                 return SUCCESS;
82         }
83
84         aborted_io_task = aborted_task->dd_data;
85         if (!aborted_io_task->scsi_cmnd) {
86                 /* raced or invalid command */
87                 spin_unlock_bh(&session->lock);
88                 return SUCCESS;
89         }
90         spin_unlock_bh(&session->lock);
91         conn = aborted_task->conn;
92         beiscsi_conn = conn->dd_data;
93         phba = beiscsi_conn->phba;
94
95         /* invalidate iocb */
96         cid = beiscsi_conn->beiscsi_conn_cid;
97         inv_tbl = phba->inv_tbl;
98         memset(inv_tbl, 0x0, sizeof(*inv_tbl));
99         inv_tbl->cid = cid;
100         inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
101         num_invalidate = 1;
102         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
103         if (!tag) {
104                 shost_printk(KERN_WARNING, phba->shost,
105                              "mgmt_invalidate_icds could not be"
106                              " submitted\n");
107                 return FAILED;
108         } else {
109                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
110                                          phba->ctrl.mcc_numtag[tag]);
111                 free_mcc_tag(&phba->ctrl, tag);
112         }
113
114         return iscsi_eh_abort(sc);
115 }
116
117 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
118 {
119         struct iscsi_task *abrt_task;
120         struct beiscsi_io_task *abrt_io_task;
121         struct iscsi_conn *conn;
122         struct beiscsi_conn *beiscsi_conn;
123         struct beiscsi_hba *phba;
124         struct iscsi_session *session;
125         struct iscsi_cls_session *cls_session;
126         struct invalidate_command_table *inv_tbl;
127         unsigned int cid, tag, i, num_invalidate;
128         int rc = FAILED;
129
130         /* invalidate iocbs */
131         cls_session = starget_to_session(scsi_target(sc->device));
132         session = cls_session->dd_data;
133         spin_lock_bh(&session->lock);
134         if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
135                 goto unlock;
136
137         conn = session->leadconn;
138         beiscsi_conn = conn->dd_data;
139         phba = beiscsi_conn->phba;
140         cid = beiscsi_conn->beiscsi_conn_cid;
141         inv_tbl = phba->inv_tbl;
142         memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
143         num_invalidate = 0;
144         for (i = 0; i < conn->session->cmds_max; i++) {
145                 abrt_task = conn->session->cmds[i];
146                 abrt_io_task = abrt_task->dd_data;
147                 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
148                         continue;
149
150                 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
151                         continue;
152
153                 inv_tbl->cid = cid;
154                 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
155                 num_invalidate++;
156                 inv_tbl++;
157         }
158         spin_unlock_bh(&session->lock);
159         inv_tbl = phba->inv_tbl;
160
161         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
162         if (!tag) {
163                 shost_printk(KERN_WARNING, phba->shost,
164                              "mgmt_invalidate_icds could not be"
165                              " submitted\n");
166                 return FAILED;
167         } else {
168                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
169                                          phba->ctrl.mcc_numtag[tag]);
170                 free_mcc_tag(&phba->ctrl, tag);
171         }
172
173         return iscsi_eh_device_reset(sc);
174 unlock:
175         spin_unlock_bh(&session->lock);
176         return rc;
177 }
178
179 /*------------------- PCI Driver operations and data ----------------- */
180 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
181         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
182         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
183         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
184         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
185         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
186         { 0 }
187 };
188 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
189
190 static struct scsi_host_template beiscsi_sht = {
191         .module = THIS_MODULE,
192         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
193         .proc_name = DRV_NAME,
194         .queuecommand = iscsi_queuecommand,
195         .change_queue_depth = iscsi_change_queue_depth,
196         .slave_configure = beiscsi_slave_configure,
197         .target_alloc = iscsi_target_alloc,
198         .eh_abort_handler = beiscsi_eh_abort,
199         .eh_device_reset_handler = beiscsi_eh_device_reset,
200         .eh_target_reset_handler = iscsi_eh_session_reset,
201         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
202         .can_queue = BE2_IO_DEPTH,
203         .this_id = -1,
204         .max_sectors = BEISCSI_MAX_SECTORS,
205         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
206         .use_clustering = ENABLE_CLUSTERING,
207 };
208
209 static struct scsi_transport_template *beiscsi_scsi_transport;
210
211 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
212 {
213         struct beiscsi_hba *phba;
214         struct Scsi_Host *shost;
215
216         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
217         if (!shost) {
218                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
219                         "iscsi_host_alloc failed\n");
220                 return NULL;
221         }
222         shost->dma_boundary = pcidev->dma_mask;
223         shost->max_id = BE2_MAX_SESSIONS;
224         shost->max_channel = 0;
225         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
226         shost->max_lun = BEISCSI_NUM_MAX_LUN;
227         shost->transportt = beiscsi_scsi_transport;
228         phba = iscsi_host_priv(shost);
229         memset(phba, 0, sizeof(*phba));
230         phba->shost = shost;
231         phba->pcidev = pci_dev_get(pcidev);
232         pci_set_drvdata(pcidev, phba);
233
234         if (iscsi_host_add(shost, &phba->pcidev->dev))
235                 goto free_devices;
236         return phba;
237
238 free_devices:
239         pci_dev_put(phba->pcidev);
240         iscsi_host_free(phba->shost);
241         return NULL;
242 }
243
244 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
245 {
246         if (phba->csr_va) {
247                 iounmap(phba->csr_va);
248                 phba->csr_va = NULL;
249         }
250         if (phba->db_va) {
251                 iounmap(phba->db_va);
252                 phba->db_va = NULL;
253         }
254         if (phba->pci_va) {
255                 iounmap(phba->pci_va);
256                 phba->pci_va = NULL;
257         }
258 }
259
260 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
261                                 struct pci_dev *pcidev)
262 {
263         u8 __iomem *addr;
264         int pcicfg_reg;
265
266         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
267                                pci_resource_len(pcidev, 2));
268         if (addr == NULL)
269                 return -ENOMEM;
270         phba->ctrl.csr = addr;
271         phba->csr_va = addr;
272         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
273
274         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
275         if (addr == NULL)
276                 goto pci_map_err;
277         phba->ctrl.db = addr;
278         phba->db_va = addr;
279         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
280
281         if (phba->generation == BE_GEN2)
282                 pcicfg_reg = 1;
283         else
284                 pcicfg_reg = 0;
285
286         addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
287                                pci_resource_len(pcidev, pcicfg_reg));
288
289         if (addr == NULL)
290                 goto pci_map_err;
291         phba->ctrl.pcicfg = addr;
292         phba->pci_va = addr;
293         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
294         return 0;
295
296 pci_map_err:
297         beiscsi_unmap_pci_function(phba);
298         return -ENOMEM;
299 }
300
301 static int beiscsi_enable_pci(struct pci_dev *pcidev)
302 {
303         int ret;
304
305         ret = pci_enable_device(pcidev);
306         if (ret) {
307                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
308                         "failed. Returning -ENODEV\n");
309                 return ret;
310         }
311
312         pci_set_master(pcidev);
313         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
314                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
315                 if (ret) {
316                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
317                         pci_disable_device(pcidev);
318                         return ret;
319                 }
320         }
321         return 0;
322 }
323
324 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
325 {
326         struct be_ctrl_info *ctrl = &phba->ctrl;
327         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
328         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
329         int status = 0;
330
331         ctrl->pdev = pdev;
332         status = beiscsi_map_pci_bars(phba, pdev);
333         if (status)
334                 return status;
335         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
336         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
337                                                   mbox_mem_alloc->size,
338                                                   &mbox_mem_alloc->dma);
339         if (!mbox_mem_alloc->va) {
340                 beiscsi_unmap_pci_function(phba);
341                 status = -ENOMEM;
342                 return status;
343         }
344
345         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
346         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
347         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
348         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
349         spin_lock_init(&ctrl->mbox_lock);
350         spin_lock_init(&phba->ctrl.mcc_lock);
351         spin_lock_init(&phba->ctrl.mcc_cq_lock);
352
353         return status;
354 }
355
356 static void beiscsi_get_params(struct beiscsi_hba *phba)
357 {
358         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
359                                     - (phba->fw_config.iscsi_cid_count
360                                     + BE2_TMFS
361                                     + BE2_NOPOUT_REQ));
362         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
363         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
364         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
365         phba->params.num_sge_per_io = BE2_SGE;
366         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
367         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
368         phba->params.eq_timer = 64;
369         phba->params.num_eq_entries =
370             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
371                                     + BE2_TMFS) / 512) + 1) * 512;
372         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
373                                 ? 1024 : phba->params.num_eq_entries;
374         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
375                              phba->params.num_eq_entries);
376         phba->params.num_cq_entries =
377             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
378                                     + BE2_TMFS) / 512) + 1) * 512;
379         phba->params.wrbs_per_cxn = 256;
380 }
381
382 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
383                            unsigned int id, unsigned int clr_interrupt,
384                            unsigned int num_processed,
385                            unsigned char rearm, unsigned char event)
386 {
387         u32 val = 0;
388         val |= id & DB_EQ_RING_ID_MASK;
389         if (rearm)
390                 val |= 1 << DB_EQ_REARM_SHIFT;
391         if (clr_interrupt)
392                 val |= 1 << DB_EQ_CLR_SHIFT;
393         if (event)
394                 val |= 1 << DB_EQ_EVNT_SHIFT;
395         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
396         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
397 }
398
399 /**
400  * be_isr_mcc - The isr routine of the driver.
401  * @irq: Not used
402  * @dev_id: Pointer to host adapter structure
403  */
404 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
405 {
406         struct beiscsi_hba *phba;
407         struct be_eq_entry *eqe = NULL;
408         struct be_queue_info *eq;
409         struct be_queue_info *mcc;
410         unsigned int num_eq_processed;
411         struct be_eq_obj *pbe_eq;
412         unsigned long flags;
413
414         pbe_eq = dev_id;
415         eq = &pbe_eq->q;
416         phba =  pbe_eq->phba;
417         mcc = &phba->ctrl.mcc_obj.cq;
418         eqe = queue_tail_node(eq);
419         if (!eqe)
420                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
421
422         num_eq_processed = 0;
423
424         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
425                                 & EQE_VALID_MASK) {
426                 if (((eqe->dw[offsetof(struct amap_eq_entry,
427                      resource_id) / 32] &
428                      EQE_RESID_MASK) >> 16) == mcc->id) {
429                         spin_lock_irqsave(&phba->isr_lock, flags);
430                         phba->todo_mcc_cq = 1;
431                         spin_unlock_irqrestore(&phba->isr_lock, flags);
432                 }
433                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
434                 queue_tail_inc(eq);
435                 eqe = queue_tail_node(eq);
436                 num_eq_processed++;
437         }
438         if (phba->todo_mcc_cq)
439                 queue_work(phba->wq, &phba->work_cqs);
440         if (num_eq_processed)
441                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
442
443         return IRQ_HANDLED;
444 }
445
446 /**
447  * be_isr_msix - The isr routine of the driver.
448  * @irq: Not used
449  * @dev_id: Pointer to host adapter structure
450  */
451 static irqreturn_t be_isr_msix(int irq, void *dev_id)
452 {
453         struct beiscsi_hba *phba;
454         struct be_eq_entry *eqe = NULL;
455         struct be_queue_info *eq;
456         struct be_queue_info *cq;
457         unsigned int num_eq_processed;
458         struct be_eq_obj *pbe_eq;
459         unsigned long flags;
460
461         pbe_eq = dev_id;
462         eq = &pbe_eq->q;
463         cq = pbe_eq->cq;
464         eqe = queue_tail_node(eq);
465         if (!eqe)
466                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
467
468         phba = pbe_eq->phba;
469         num_eq_processed = 0;
470         if (blk_iopoll_enabled) {
471                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
472                                         & EQE_VALID_MASK) {
473                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
474                                 blk_iopoll_sched(&pbe_eq->iopoll);
475
476                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
477                         queue_tail_inc(eq);
478                         eqe = queue_tail_node(eq);
479                         num_eq_processed++;
480                 }
481                 if (num_eq_processed)
482                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
483
484                 return IRQ_HANDLED;
485         } else {
486                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
487                                                 & EQE_VALID_MASK) {
488                         spin_lock_irqsave(&phba->isr_lock, flags);
489                         phba->todo_cq = 1;
490                         spin_unlock_irqrestore(&phba->isr_lock, flags);
491                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
492                         queue_tail_inc(eq);
493                         eqe = queue_tail_node(eq);
494                         num_eq_processed++;
495                 }
496                 if (phba->todo_cq)
497                         queue_work(phba->wq, &phba->work_cqs);
498
499                 if (num_eq_processed)
500                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
501
502                 return IRQ_HANDLED;
503         }
504 }
505
506 /**
507  * be_isr - The isr routine of the driver.
508  * @irq: Not used
509  * @dev_id: Pointer to host adapter structure
510  */
511 static irqreturn_t be_isr(int irq, void *dev_id)
512 {
513         struct beiscsi_hba *phba;
514         struct hwi_controller *phwi_ctrlr;
515         struct hwi_context_memory *phwi_context;
516         struct be_eq_entry *eqe = NULL;
517         struct be_queue_info *eq;
518         struct be_queue_info *cq;
519         struct be_queue_info *mcc;
520         unsigned long flags, index;
521         unsigned int num_mcceq_processed, num_ioeq_processed;
522         struct be_ctrl_info *ctrl;
523         struct be_eq_obj *pbe_eq;
524         int isr;
525
526         phba = dev_id;
527         ctrl = &phba->ctrl;;
528         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
529                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
530         if (!isr)
531                 return IRQ_NONE;
532
533         phwi_ctrlr = phba->phwi_ctrlr;
534         phwi_context = phwi_ctrlr->phwi_ctxt;
535         pbe_eq = &phwi_context->be_eq[0];
536
537         eq = &phwi_context->be_eq[0].q;
538         mcc = &phba->ctrl.mcc_obj.cq;
539         index = 0;
540         eqe = queue_tail_node(eq);
541         if (!eqe)
542                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
543
544         num_ioeq_processed = 0;
545         num_mcceq_processed = 0;
546         if (blk_iopoll_enabled) {
547                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
548                                         & EQE_VALID_MASK) {
549                         if (((eqe->dw[offsetof(struct amap_eq_entry,
550                              resource_id) / 32] &
551                              EQE_RESID_MASK) >> 16) == mcc->id) {
552                                 spin_lock_irqsave(&phba->isr_lock, flags);
553                                 phba->todo_mcc_cq = 1;
554                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
555                                 num_mcceq_processed++;
556                         } else {
557                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
558                                         blk_iopoll_sched(&pbe_eq->iopoll);
559                                 num_ioeq_processed++;
560                         }
561                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
562                         queue_tail_inc(eq);
563                         eqe = queue_tail_node(eq);
564                 }
565                 if (num_ioeq_processed || num_mcceq_processed) {
566                         if (phba->todo_mcc_cq)
567                                 queue_work(phba->wq, &phba->work_cqs);
568
569                         if ((num_mcceq_processed) && (!num_ioeq_processed))
570                                 hwi_ring_eq_db(phba, eq->id, 0,
571                                               (num_ioeq_processed +
572                                                num_mcceq_processed) , 1, 1);
573                         else
574                                 hwi_ring_eq_db(phba, eq->id, 0,
575                                                (num_ioeq_processed +
576                                                 num_mcceq_processed), 0, 1);
577
578                         return IRQ_HANDLED;
579                 } else
580                         return IRQ_NONE;
581         } else {
582                 cq = &phwi_context->be_cq[0];
583                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
584                                                 & EQE_VALID_MASK) {
585
586                         if (((eqe->dw[offsetof(struct amap_eq_entry,
587                              resource_id) / 32] &
588                              EQE_RESID_MASK) >> 16) != cq->id) {
589                                 spin_lock_irqsave(&phba->isr_lock, flags);
590                                 phba->todo_mcc_cq = 1;
591                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
592                         } else {
593                                 spin_lock_irqsave(&phba->isr_lock, flags);
594                                 phba->todo_cq = 1;
595                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
596                         }
597                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
598                         queue_tail_inc(eq);
599                         eqe = queue_tail_node(eq);
600                         num_ioeq_processed++;
601                 }
602                 if (phba->todo_cq || phba->todo_mcc_cq)
603                         queue_work(phba->wq, &phba->work_cqs);
604
605                 if (num_ioeq_processed) {
606                         hwi_ring_eq_db(phba, eq->id, 0,
607                                        num_ioeq_processed, 1, 1);
608                         return IRQ_HANDLED;
609                 } else
610                         return IRQ_NONE;
611         }
612 }
613
614 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
615 {
616         struct pci_dev *pcidev = phba->pcidev;
617         struct hwi_controller *phwi_ctrlr;
618         struct hwi_context_memory *phwi_context;
619         int ret, msix_vec, i = 0;
620         char desc[32];
621
622         phwi_ctrlr = phba->phwi_ctrlr;
623         phwi_context = phwi_ctrlr->phwi_ctxt;
624
625         if (phba->msix_enabled) {
626                 for (i = 0; i < phba->num_cpus; i++) {
627                         sprintf(desc, "beiscsi_msix_%04x", i);
628                         msix_vec = phba->msix_entries[i].vector;
629                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
630                                           &phwi_context->be_eq[i]);
631                 }
632                 msix_vec = phba->msix_entries[i].vector;
633                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
634                                   &phwi_context->be_eq[i]);
635         } else {
636                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
637                                   "beiscsi", phba);
638                 if (ret) {
639                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
640                                      "Failed to register irq\\n");
641                         return ret;
642                 }
643         }
644         return 0;
645 }
646
647 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
648                            unsigned int id, unsigned int num_processed,
649                            unsigned char rearm, unsigned char event)
650 {
651         u32 val = 0;
652         val |= id & DB_CQ_RING_ID_MASK;
653         if (rearm)
654                 val |= 1 << DB_CQ_REARM_SHIFT;
655         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
656         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
657 }
658
659 static unsigned int
660 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
661                           struct beiscsi_hba *phba,
662                           unsigned short cid,
663                           struct pdu_base *ppdu,
664                           unsigned long pdu_len,
665                           void *pbuffer, unsigned long buf_len)
666 {
667         struct iscsi_conn *conn = beiscsi_conn->conn;
668         struct iscsi_session *session = conn->session;
669         struct iscsi_task *task;
670         struct beiscsi_io_task *io_task;
671         struct iscsi_hdr *login_hdr;
672
673         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
674                                                 PDUBASE_OPCODE_MASK) {
675         case ISCSI_OP_NOOP_IN:
676                 pbuffer = NULL;
677                 buf_len = 0;
678                 break;
679         case ISCSI_OP_ASYNC_EVENT:
680                 break;
681         case ISCSI_OP_REJECT:
682                 WARN_ON(!pbuffer);
683                 WARN_ON(!(buf_len == 48));
684                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
685                 break;
686         case ISCSI_OP_LOGIN_RSP:
687         case ISCSI_OP_TEXT_RSP:
688                 task = conn->login_task;
689                 io_task = task->dd_data;
690                 login_hdr = (struct iscsi_hdr *)ppdu;
691                 login_hdr->itt = io_task->libiscsi_itt;
692                 break;
693         default:
694                 shost_printk(KERN_WARNING, phba->shost,
695                              "Unrecognized opcode 0x%x in async msg\n",
696                              (ppdu->
697                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
698                                                 & PDUBASE_OPCODE_MASK));
699                 return 1;
700         }
701
702         spin_lock_bh(&session->lock);
703         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
704         spin_unlock_bh(&session->lock);
705         return 0;
706 }
707
708 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
709 {
710         struct sgl_handle *psgl_handle;
711
712         if (phba->io_sgl_hndl_avbl) {
713                 SE_DEBUG(DBG_LVL_8,
714                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
715                          phba->io_sgl_alloc_index);
716                 psgl_handle = phba->io_sgl_hndl_base[phba->
717                                                 io_sgl_alloc_index];
718                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
719                 phba->io_sgl_hndl_avbl--;
720                 if (phba->io_sgl_alloc_index == (phba->params.
721                                                  ios_per_ctrl - 1))
722                         phba->io_sgl_alloc_index = 0;
723                 else
724                         phba->io_sgl_alloc_index++;
725         } else
726                 psgl_handle = NULL;
727         return psgl_handle;
728 }
729
730 static void
731 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
732 {
733         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
734                  phba->io_sgl_free_index);
735         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
736                 /*
737                  * this can happen if clean_task is called on a task that
738                  * failed in xmit_task or alloc_pdu.
739                  */
740                  SE_DEBUG(DBG_LVL_8,
741                          "Double Free in IO SGL io_sgl_free_index=%d,"
742                          "value there=%p\n", phba->io_sgl_free_index,
743                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
744                 return;
745         }
746         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
747         phba->io_sgl_hndl_avbl++;
748         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
749                 phba->io_sgl_free_index = 0;
750         else
751                 phba->io_sgl_free_index++;
752 }
753
754 /**
755  * alloc_wrb_handle - To allocate a wrb handle
756  * @phba: The hba pointer
757  * @cid: The cid to use for allocation
758  *
759  * This happens under session_lock until submission to chip
760  */
761 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
762 {
763         struct hwi_wrb_context *pwrb_context;
764         struct hwi_controller *phwi_ctrlr;
765         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
766
767         phwi_ctrlr = phba->phwi_ctrlr;
768         pwrb_context = &phwi_ctrlr->wrb_context[cid];
769         if (pwrb_context->wrb_handles_available >= 2) {
770                 pwrb_handle = pwrb_context->pwrb_handle_base[
771                                             pwrb_context->alloc_index];
772                 pwrb_context->wrb_handles_available--;
773                 if (pwrb_context->alloc_index ==
774                                                 (phba->params.wrbs_per_cxn - 1))
775                         pwrb_context->alloc_index = 0;
776                 else
777                         pwrb_context->alloc_index++;
778                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
779                                                 pwrb_context->alloc_index];
780                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
781         } else
782                 pwrb_handle = NULL;
783         return pwrb_handle;
784 }
785
786 /**
787  * free_wrb_handle - To free the wrb handle back to pool
788  * @phba: The hba pointer
789  * @pwrb_context: The context to free from
790  * @pwrb_handle: The wrb_handle to free
791  *
792  * This happens under session_lock until submission to chip
793  */
794 static void
795 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
796                 struct wrb_handle *pwrb_handle)
797 {
798         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
799         pwrb_context->wrb_handles_available++;
800         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
801                 pwrb_context->free_index = 0;
802         else
803                 pwrb_context->free_index++;
804
805         SE_DEBUG(DBG_LVL_8,
806                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
807                  "wrb_handles_available=%d\n",
808                  pwrb_handle, pwrb_context->free_index,
809                  pwrb_context->wrb_handles_available);
810 }
811
812 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
813 {
814         struct sgl_handle *psgl_handle;
815
816         if (phba->eh_sgl_hndl_avbl) {
817                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
818                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
819                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
820                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
821                 phba->eh_sgl_hndl_avbl--;
822                 if (phba->eh_sgl_alloc_index ==
823                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
824                      1))
825                         phba->eh_sgl_alloc_index = 0;
826                 else
827                         phba->eh_sgl_alloc_index++;
828         } else
829                 psgl_handle = NULL;
830         return psgl_handle;
831 }
832
833 void
834 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
835 {
836
837         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
838                              phba->eh_sgl_free_index);
839         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
840                 /*
841                  * this can happen if clean_task is called on a task that
842                  * failed in xmit_task or alloc_pdu.
843                  */
844                 SE_DEBUG(DBG_LVL_8,
845                          "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
846                          phba->eh_sgl_free_index);
847                 return;
848         }
849         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
850         phba->eh_sgl_hndl_avbl++;
851         if (phba->eh_sgl_free_index ==
852             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
853                 phba->eh_sgl_free_index = 0;
854         else
855                 phba->eh_sgl_free_index++;
856 }
857
858 static void
859 be_complete_io(struct beiscsi_conn *beiscsi_conn,
860                struct iscsi_task *task, struct sol_cqe *psol)
861 {
862         struct beiscsi_io_task *io_task = task->dd_data;
863         struct be_status_bhs *sts_bhs =
864                                 (struct be_status_bhs *)io_task->cmd_bhs;
865         struct iscsi_conn *conn = beiscsi_conn->conn;
866         unsigned int sense_len;
867         unsigned char *sense;
868         u32 resid = 0, exp_cmdsn, max_cmdsn;
869         u8 rsp, status, flags;
870
871         exp_cmdsn = (psol->
872                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
873                         & SOL_EXP_CMD_SN_MASK);
874         max_cmdsn = ((psol->
875                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
876                         & SOL_EXP_CMD_SN_MASK) +
877                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
878                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
879         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
880                                                 & SOL_RESP_MASK) >> 16);
881         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
882                                                 & SOL_STS_MASK) >> 8);
883         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
884                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
885
886         task->sc->result = (DID_OK << 16) | status;
887         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
888                 task->sc->result = DID_ERROR << 16;
889                 goto unmap;
890         }
891
892         /* bidi not initially supported */
893         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
894                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
895                                 32] & SOL_RES_CNT_MASK);
896
897                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
898                         task->sc->result = DID_ERROR << 16;
899
900                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
901                         scsi_set_resid(task->sc, resid);
902                         if (!status && (scsi_bufflen(task->sc) - resid <
903                             task->sc->underflow))
904                                 task->sc->result = DID_ERROR << 16;
905                 }
906         }
907
908         if (status == SAM_STAT_CHECK_CONDITION) {
909                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
910                 sense = sts_bhs->sense_info + sizeof(unsigned short);
911                 sense_len =  cpu_to_be16(*slen);
912                 memcpy(task->sc->sense_buffer, sense,
913                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
914         }
915
916         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
917                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
918                                                         & SOL_RES_CNT_MASK)
919                          conn->rxdata_octets += (psol->
920                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
921                              & SOL_RES_CNT_MASK);
922         }
923 unmap:
924         scsi_dma_unmap(io_task->scsi_cmnd);
925         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
926 }
927
928 static void
929 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
930                    struct iscsi_task *task, struct sol_cqe *psol)
931 {
932         struct iscsi_logout_rsp *hdr;
933         struct beiscsi_io_task *io_task = task->dd_data;
934         struct iscsi_conn *conn = beiscsi_conn->conn;
935
936         hdr = (struct iscsi_logout_rsp *)task->hdr;
937         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
938         hdr->t2wait = 5;
939         hdr->t2retain = 0;
940         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
941                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
942         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
943                                         32] & SOL_RESP_MASK);
944         hdr->exp_cmdsn = cpu_to_be32(psol->
945                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
946                                         & SOL_EXP_CMD_SN_MASK);
947         hdr->max_cmdsn = be32_to_cpu((psol->
948                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
949                                         & SOL_EXP_CMD_SN_MASK) +
950                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
951                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
952         hdr->dlength[0] = 0;
953         hdr->dlength[1] = 0;
954         hdr->dlength[2] = 0;
955         hdr->hlength = 0;
956         hdr->itt = io_task->libiscsi_itt;
957         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
958 }
959
960 static void
961 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
962                 struct iscsi_task *task, struct sol_cqe *psol)
963 {
964         struct iscsi_tm_rsp *hdr;
965         struct iscsi_conn *conn = beiscsi_conn->conn;
966         struct beiscsi_io_task *io_task = task->dd_data;
967
968         hdr = (struct iscsi_tm_rsp *)task->hdr;
969         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
970         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
971                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
972         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
973                                         32] & SOL_RESP_MASK);
974         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
975                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
976         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
977                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
978                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
979                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
980         hdr->itt = io_task->libiscsi_itt;
981         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
982 }
983
984 static void
985 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
986                        struct beiscsi_hba *phba, struct sol_cqe *psol)
987 {
988         struct hwi_wrb_context *pwrb_context;
989         struct wrb_handle *pwrb_handle = NULL;
990         struct hwi_controller *phwi_ctrlr;
991         struct iscsi_task *task;
992         struct beiscsi_io_task *io_task;
993         struct iscsi_conn *conn = beiscsi_conn->conn;
994         struct iscsi_session *session = conn->session;
995
996         phwi_ctrlr = phba->phwi_ctrlr;
997         pwrb_context = &phwi_ctrlr->wrb_context[((psol->
998                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
999                                 SOL_CID_MASK) >> 6) -
1000                                 phba->fw_config.iscsi_cid_start];
1001         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1002                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1003                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1004         task = pwrb_handle->pio_handle;
1005
1006         io_task = task->dd_data;
1007         spin_lock(&phba->mgmt_sgl_lock);
1008         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1009         spin_unlock(&phba->mgmt_sgl_lock);
1010         spin_lock_bh(&session->lock);
1011         free_wrb_handle(phba, pwrb_context, pwrb_handle);
1012         spin_unlock_bh(&session->lock);
1013 }
1014
1015 static void
1016 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1017                        struct iscsi_task *task, struct sol_cqe *psol)
1018 {
1019         struct iscsi_nopin *hdr;
1020         struct iscsi_conn *conn = beiscsi_conn->conn;
1021         struct beiscsi_io_task *io_task = task->dd_data;
1022
1023         hdr = (struct iscsi_nopin *)task->hdr;
1024         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1025                         & SOL_FLAGS_MASK) >> 24) | 0x80;
1026         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1027                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1028         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1029                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1030                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1031                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1032         hdr->opcode = ISCSI_OP_NOOP_IN;
1033         hdr->itt = io_task->libiscsi_itt;
1034         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1035 }
1036
1037 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1038                              struct beiscsi_hba *phba, struct sol_cqe *psol)
1039 {
1040         struct hwi_wrb_context *pwrb_context;
1041         struct wrb_handle *pwrb_handle;
1042         struct iscsi_wrb *pwrb = NULL;
1043         struct hwi_controller *phwi_ctrlr;
1044         struct iscsi_task *task;
1045         unsigned int type;
1046         struct iscsi_conn *conn = beiscsi_conn->conn;
1047         struct iscsi_session *session = conn->session;
1048
1049         phwi_ctrlr = phba->phwi_ctrlr;
1050         pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1051                                 (struct amap_sol_cqe, cid) / 32]
1052                                 & SOL_CID_MASK) >> 6) -
1053                                 phba->fw_config.iscsi_cid_start];
1054         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1055                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1056                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1057         task = pwrb_handle->pio_handle;
1058         pwrb = pwrb_handle->pwrb;
1059         type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1060                                  WRB_TYPE_MASK) >> 28;
1061
1062         spin_lock_bh(&session->lock);
1063         switch (type) {
1064         case HWH_TYPE_IO:
1065         case HWH_TYPE_IO_RD:
1066                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1067                      ISCSI_OP_NOOP_OUT)
1068                         be_complete_nopin_resp(beiscsi_conn, task, psol);
1069                 else
1070                         be_complete_io(beiscsi_conn, task, psol);
1071                 break;
1072
1073         case HWH_TYPE_LOGOUT:
1074                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1075                         be_complete_logout(beiscsi_conn, task, psol);
1076                 else
1077                         be_complete_tmf(beiscsi_conn, task, psol);
1078
1079                 break;
1080
1081         case HWH_TYPE_LOGIN:
1082                 SE_DEBUG(DBG_LVL_1,
1083                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1084                          "- Solicited path\n");
1085                 break;
1086
1087         case HWH_TYPE_NOP:
1088                 be_complete_nopin_resp(beiscsi_conn, task, psol);
1089                 break;
1090
1091         default:
1092                 shost_printk(KERN_WARNING, phba->shost,
1093                                 "In hwi_complete_cmd, unknown type = %d"
1094                                 "wrb_index 0x%x CID 0x%x\n", type,
1095                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1096                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1097                                 ((psol->dw[offsetof(struct amap_sol_cqe,
1098                                 cid) / 32] & SOL_CID_MASK) >> 6));
1099                 break;
1100         }
1101
1102         spin_unlock_bh(&session->lock);
1103 }
1104
1105 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1106                                           *pasync_ctx, unsigned int is_header,
1107                                           unsigned int host_write_ptr)
1108 {
1109         if (is_header)
1110                 return &pasync_ctx->async_entry[host_write_ptr].
1111                     header_busy_list;
1112         else
1113                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1114 }
1115
1116 static struct async_pdu_handle *
1117 hwi_get_async_handle(struct beiscsi_hba *phba,
1118                      struct beiscsi_conn *beiscsi_conn,
1119                      struct hwi_async_pdu_context *pasync_ctx,
1120                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1121 {
1122         struct be_bus_address phys_addr;
1123         struct list_head *pbusy_list;
1124         struct async_pdu_handle *pasync_handle = NULL;
1125         int buffer_len = 0;
1126         unsigned char buffer_index = -1;
1127         unsigned char is_header = 0;
1128
1129         phys_addr.u.a32.address_lo =
1130             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1131             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1132                                                 & PDUCQE_DPL_MASK) >> 16);
1133         phys_addr.u.a32.address_hi =
1134             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1135
1136         phys_addr.u.a64.address =
1137                         *((unsigned long long *)(&phys_addr.u.a64.address));
1138
1139         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1140                         & PDUCQE_CODE_MASK) {
1141         case UNSOL_HDR_NOTIFY:
1142                 is_header = 1;
1143
1144                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1145                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1146                         index) / 32] & PDUCQE_INDEX_MASK));
1147
1148                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1149                                 pasync_ctx->async_header.pa_base.u.a64.address);
1150
1151                 buffer_index = buffer_len /
1152                                 pasync_ctx->async_header.buffer_size;
1153
1154                 break;
1155         case UNSOL_DATA_NOTIFY:
1156                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1157                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1158                                         index) / 32] & PDUCQE_INDEX_MASK));
1159                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1160                                         pasync_ctx->async_data.pa_base.u.
1161                                         a64.address);
1162                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1163                 break;
1164         default:
1165                 pbusy_list = NULL;
1166                 shost_printk(KERN_WARNING, phba->shost,
1167                         "Unexpected code=%d\n",
1168                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1169                                         code) / 32] & PDUCQE_CODE_MASK);
1170                 return NULL;
1171         }
1172
1173         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1174         WARN_ON(list_empty(pbusy_list));
1175         list_for_each_entry(pasync_handle, pbusy_list, link) {
1176                 WARN_ON(pasync_handle->consumed);
1177                 if (pasync_handle->index == buffer_index)
1178                         break;
1179         }
1180
1181         WARN_ON(!pasync_handle);
1182
1183         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1184                                              phba->fw_config.iscsi_cid_start;
1185         pasync_handle->is_header = is_header;
1186         pasync_handle->buffer_len = ((pdpdu_cqe->
1187                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1188                         & PDUCQE_DPL_MASK) >> 16);
1189
1190         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1191                         index) / 32] & PDUCQE_INDEX_MASK);
1192         return pasync_handle;
1193 }
1194
1195 static unsigned int
1196 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1197                            unsigned int is_header, unsigned int cq_index)
1198 {
1199         struct list_head *pbusy_list;
1200         struct async_pdu_handle *pasync_handle;
1201         unsigned int num_entries, writables = 0;
1202         unsigned int *pep_read_ptr, *pwritables;
1203
1204
1205         if (is_header) {
1206                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1207                 pwritables = &pasync_ctx->async_header.writables;
1208                 num_entries = pasync_ctx->async_header.num_entries;
1209         } else {
1210                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1211                 pwritables = &pasync_ctx->async_data.writables;
1212                 num_entries = pasync_ctx->async_data.num_entries;
1213         }
1214
1215         while ((*pep_read_ptr) != cq_index) {
1216                 (*pep_read_ptr)++;
1217                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1218
1219                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1220                                                      *pep_read_ptr);
1221                 if (writables == 0)
1222                         WARN_ON(list_empty(pbusy_list));
1223
1224                 if (!list_empty(pbusy_list)) {
1225                         pasync_handle = list_entry(pbusy_list->next,
1226                                                    struct async_pdu_handle,
1227                                                    link);
1228                         WARN_ON(!pasync_handle);
1229                         pasync_handle->consumed = 1;
1230                 }
1231
1232                 writables++;
1233         }
1234
1235         if (!writables) {
1236                 SE_DEBUG(DBG_LVL_1,
1237                          "Duplicate notification received - index 0x%x!!\n",
1238                          cq_index);
1239                 WARN_ON(1);
1240         }
1241
1242         *pwritables = *pwritables + writables;
1243         return 0;
1244 }
1245
1246 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1247                                        unsigned int cri)
1248 {
1249         struct hwi_controller *phwi_ctrlr;
1250         struct hwi_async_pdu_context *pasync_ctx;
1251         struct async_pdu_handle *pasync_handle, *tmp_handle;
1252         struct list_head *plist;
1253         unsigned int i = 0;
1254
1255         phwi_ctrlr = phba->phwi_ctrlr;
1256         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1257
1258         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1259
1260         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1261                 list_del(&pasync_handle->link);
1262
1263                 if (i == 0) {
1264                         list_add_tail(&pasync_handle->link,
1265                                       &pasync_ctx->async_header.free_list);
1266                         pasync_ctx->async_header.free_entries++;
1267                         i++;
1268                 } else {
1269                         list_add_tail(&pasync_handle->link,
1270                                       &pasync_ctx->async_data.free_list);
1271                         pasync_ctx->async_data.free_entries++;
1272                         i++;
1273                 }
1274         }
1275
1276         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1277         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1278         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1279         return 0;
1280 }
1281
1282 static struct phys_addr *
1283 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1284                      unsigned int is_header, unsigned int host_write_ptr)
1285 {
1286         struct phys_addr *pasync_sge = NULL;
1287
1288         if (is_header)
1289                 pasync_sge = pasync_ctx->async_header.ring_base;
1290         else
1291                 pasync_sge = pasync_ctx->async_data.ring_base;
1292
1293         return pasync_sge + host_write_ptr;
1294 }
1295
1296 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1297                                    unsigned int is_header)
1298 {
1299         struct hwi_controller *phwi_ctrlr;
1300         struct hwi_async_pdu_context *pasync_ctx;
1301         struct async_pdu_handle *pasync_handle;
1302         struct list_head *pfree_link, *pbusy_list;
1303         struct phys_addr *pasync_sge;
1304         unsigned int ring_id, num_entries;
1305         unsigned int host_write_num;
1306         unsigned int writables;
1307         unsigned int i = 0;
1308         u32 doorbell = 0;
1309
1310         phwi_ctrlr = phba->phwi_ctrlr;
1311         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1312
1313         if (is_header) {
1314                 num_entries = pasync_ctx->async_header.num_entries;
1315                 writables = min(pasync_ctx->async_header.writables,
1316                                 pasync_ctx->async_header.free_entries);
1317                 pfree_link = pasync_ctx->async_header.free_list.next;
1318                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1319                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1320         } else {
1321                 num_entries = pasync_ctx->async_data.num_entries;
1322                 writables = min(pasync_ctx->async_data.writables,
1323                                 pasync_ctx->async_data.free_entries);
1324                 pfree_link = pasync_ctx->async_data.free_list.next;
1325                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1326                 ring_id = phwi_ctrlr->default_pdu_data.id;
1327         }
1328
1329         writables = (writables / 8) * 8;
1330         if (writables) {
1331                 for (i = 0; i < writables; i++) {
1332                         pbusy_list =
1333                             hwi_get_async_busy_list(pasync_ctx, is_header,
1334                                                     host_write_num);
1335                         pasync_handle =
1336                             list_entry(pfree_link, struct async_pdu_handle,
1337                                                                 link);
1338                         WARN_ON(!pasync_handle);
1339                         pasync_handle->consumed = 0;
1340
1341                         pfree_link = pfree_link->next;
1342
1343                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1344                                                 is_header, host_write_num);
1345
1346                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1347                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1348
1349                         list_move(&pasync_handle->link, pbusy_list);
1350
1351                         host_write_num++;
1352                         host_write_num = host_write_num % num_entries;
1353                 }
1354
1355                 if (is_header) {
1356                         pasync_ctx->async_header.host_write_ptr =
1357                                                         host_write_num;
1358                         pasync_ctx->async_header.free_entries -= writables;
1359                         pasync_ctx->async_header.writables -= writables;
1360                         pasync_ctx->async_header.busy_entries += writables;
1361                 } else {
1362                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1363                         pasync_ctx->async_data.free_entries -= writables;
1364                         pasync_ctx->async_data.writables -= writables;
1365                         pasync_ctx->async_data.busy_entries += writables;
1366                 }
1367
1368                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1369                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1370                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1371                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1372                                         << DB_DEF_PDU_CQPROC_SHIFT;
1373
1374                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1375         }
1376 }
1377
1378 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1379                                          struct beiscsi_conn *beiscsi_conn,
1380                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1381 {
1382         struct hwi_controller *phwi_ctrlr;
1383         struct hwi_async_pdu_context *pasync_ctx;
1384         struct async_pdu_handle *pasync_handle = NULL;
1385         unsigned int cq_index = -1;
1386
1387         phwi_ctrlr = phba->phwi_ctrlr;
1388         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1389
1390         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1391                                              pdpdu_cqe, &cq_index);
1392         BUG_ON(pasync_handle->is_header != 0);
1393         if (pasync_handle->consumed == 0)
1394                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1395                                            cq_index);
1396
1397         hwi_free_async_msg(phba, pasync_handle->cri);
1398         hwi_post_async_buffers(phba, pasync_handle->is_header);
1399 }
1400
1401 static unsigned int
1402 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1403                   struct beiscsi_hba *phba,
1404                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1405 {
1406         struct list_head *plist;
1407         struct async_pdu_handle *pasync_handle;
1408         void *phdr = NULL;
1409         unsigned int hdr_len = 0, buf_len = 0;
1410         unsigned int status, index = 0, offset = 0;
1411         void *pfirst_buffer = NULL;
1412         unsigned int num_buf = 0;
1413
1414         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1415
1416         list_for_each_entry(pasync_handle, plist, link) {
1417                 if (index == 0) {
1418                         phdr = pasync_handle->pbuffer;
1419                         hdr_len = pasync_handle->buffer_len;
1420                 } else {
1421                         buf_len = pasync_handle->buffer_len;
1422                         if (!num_buf) {
1423                                 pfirst_buffer = pasync_handle->pbuffer;
1424                                 num_buf++;
1425                         }
1426                         memcpy(pfirst_buffer + offset,
1427                                pasync_handle->pbuffer, buf_len);
1428                         offset = buf_len;
1429                 }
1430                 index++;
1431         }
1432
1433         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1434                                            (beiscsi_conn->beiscsi_conn_cid -
1435                                             phba->fw_config.iscsi_cid_start),
1436                                             phdr, hdr_len, pfirst_buffer,
1437                                             buf_len);
1438
1439         if (status == 0)
1440                 hwi_free_async_msg(phba, cri);
1441         return 0;
1442 }
1443
1444 static unsigned int
1445 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1446                      struct beiscsi_hba *phba,
1447                      struct async_pdu_handle *pasync_handle)
1448 {
1449         struct hwi_async_pdu_context *pasync_ctx;
1450         struct hwi_controller *phwi_ctrlr;
1451         unsigned int bytes_needed = 0, status = 0;
1452         unsigned short cri = pasync_handle->cri;
1453         struct pdu_base *ppdu;
1454
1455         phwi_ctrlr = phba->phwi_ctrlr;
1456         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1457
1458         list_del(&pasync_handle->link);
1459         if (pasync_handle->is_header) {
1460                 pasync_ctx->async_header.busy_entries--;
1461                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1462                         hwi_free_async_msg(phba, cri);
1463                         BUG();
1464                 }
1465
1466                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1467                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1468                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1469                                 (unsigned short)pasync_handle->buffer_len;
1470                 list_add_tail(&pasync_handle->link,
1471                               &pasync_ctx->async_entry[cri].wait_queue.list);
1472
1473                 ppdu = pasync_handle->pbuffer;
1474                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1475                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1476                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1477                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1478                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1479
1480                 if (status == 0) {
1481                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1482                             bytes_needed;
1483
1484                         if (bytes_needed == 0)
1485                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1486                                                            pasync_ctx, cri);
1487                 }
1488         } else {
1489                 pasync_ctx->async_data.busy_entries--;
1490                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1491                         list_add_tail(&pasync_handle->link,
1492                                       &pasync_ctx->async_entry[cri].wait_queue.
1493                                       list);
1494                         pasync_ctx->async_entry[cri].wait_queue.
1495                                 bytes_received +=
1496                                 (unsigned short)pasync_handle->buffer_len;
1497
1498                         if (pasync_ctx->async_entry[cri].wait_queue.
1499                             bytes_received >=
1500                             pasync_ctx->async_entry[cri].wait_queue.
1501                             bytes_needed)
1502                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1503                                                            pasync_ctx, cri);
1504                 }
1505         }
1506         return status;
1507 }
1508
1509 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1510                                          struct beiscsi_hba *phba,
1511                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1512 {
1513         struct hwi_controller *phwi_ctrlr;
1514         struct hwi_async_pdu_context *pasync_ctx;
1515         struct async_pdu_handle *pasync_handle = NULL;
1516         unsigned int cq_index = -1;
1517
1518         phwi_ctrlr = phba->phwi_ctrlr;
1519         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1520         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1521                                              pdpdu_cqe, &cq_index);
1522
1523         if (pasync_handle->consumed == 0)
1524                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1525                                            cq_index);
1526         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1527         hwi_post_async_buffers(phba, pasync_handle->is_header);
1528 }
1529
1530 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1531 {
1532         struct be_queue_info *mcc_cq;
1533         struct  be_mcc_compl *mcc_compl;
1534         unsigned int num_processed = 0;
1535
1536         mcc_cq = &phba->ctrl.mcc_obj.cq;
1537         mcc_compl = queue_tail_node(mcc_cq);
1538         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1539         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1540
1541                 if (num_processed >= 32) {
1542                         hwi_ring_cq_db(phba, mcc_cq->id,
1543                                         num_processed, 0, 0);
1544                         num_processed = 0;
1545                 }
1546                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1547                         /* Interpret flags as an async trailer */
1548                         if (is_link_state_evt(mcc_compl->flags))
1549                                 /* Interpret compl as a async link evt */
1550                                 beiscsi_async_link_state_process(phba,
1551                                 (struct be_async_event_link_state *) mcc_compl);
1552                         else
1553                                 SE_DEBUG(DBG_LVL_1,
1554                                         " Unsupported Async Event, flags"
1555                                         " = 0x%08x\n", mcc_compl->flags);
1556                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1557                         be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1558                         atomic_dec(&phba->ctrl.mcc_obj.q.used);
1559                 }
1560
1561                 mcc_compl->flags = 0;
1562                 queue_tail_inc(mcc_cq);
1563                 mcc_compl = queue_tail_node(mcc_cq);
1564                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1565                 num_processed++;
1566         }
1567
1568         if (num_processed > 0)
1569                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1570
1571 }
1572
1573 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1574 {
1575         struct be_queue_info *cq;
1576         struct sol_cqe *sol;
1577         struct dmsg_cqe *dmsg;
1578         unsigned int num_processed = 0;
1579         unsigned int tot_nump = 0;
1580         struct beiscsi_conn *beiscsi_conn;
1581         struct beiscsi_endpoint *beiscsi_ep;
1582         struct iscsi_endpoint *ep;
1583         struct beiscsi_hba *phba;
1584
1585         cq = pbe_eq->cq;
1586         sol = queue_tail_node(cq);
1587         phba = pbe_eq->phba;
1588
1589         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1590                CQE_VALID_MASK) {
1591                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1592
1593                 ep = phba->ep_array[(u32) ((sol->
1594                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1595                                    SOL_CID_MASK) >> 6) -
1596                                    phba->fw_config.iscsi_cid_start];
1597
1598                 beiscsi_ep = ep->dd_data;
1599                 beiscsi_conn = beiscsi_ep->conn;
1600
1601                 if (num_processed >= 32) {
1602                         hwi_ring_cq_db(phba, cq->id,
1603                                         num_processed, 0, 0);
1604                         tot_nump += num_processed;
1605                         num_processed = 0;
1606                 }
1607
1608                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1609                         32] & CQE_CODE_MASK) {
1610                 case SOL_CMD_COMPLETE:
1611                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1612                         break;
1613                 case DRIVERMSG_NOTIFY:
1614                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1615                         dmsg = (struct dmsg_cqe *)sol;
1616                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1617                         break;
1618                 case UNSOL_HDR_NOTIFY:
1619                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1620                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1621                                              (struct i_t_dpdu_cqe *)sol);
1622                         break;
1623                 case UNSOL_DATA_NOTIFY:
1624                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1625                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1626                                              (struct i_t_dpdu_cqe *)sol);
1627                         break;
1628                 case CXN_INVALIDATE_INDEX_NOTIFY:
1629                 case CMD_INVALIDATED_NOTIFY:
1630                 case CXN_INVALIDATE_NOTIFY:
1631                         SE_DEBUG(DBG_LVL_1,
1632                                  "Ignoring CQ Error notification for cmd/cxn"
1633                                  "invalidate\n");
1634                         break;
1635                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1636                 case CMD_KILLED_INVALID_STATSN_RCVD:
1637                 case CMD_KILLED_INVALID_R2T_RCVD:
1638                 case CMD_CXN_KILLED_LUN_INVALID:
1639                 case CMD_CXN_KILLED_ICD_INVALID:
1640                 case CMD_CXN_KILLED_ITT_INVALID:
1641                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1642                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1643                         SE_DEBUG(DBG_LVL_1,
1644                                  "CQ Error notification for cmd.. "
1645                                  "code %d cid 0x%x\n",
1646                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1647                                  32] & CQE_CODE_MASK,
1648                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1649                                  32] & SOL_CID_MASK));
1650                         break;
1651                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1652                         SE_DEBUG(DBG_LVL_1,
1653                                  "Digest error on def pdu ring, dropping..\n");
1654                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1655                                              (struct i_t_dpdu_cqe *) sol);
1656                         break;
1657                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1658                 case CXN_KILLED_BURST_LEN_MISMATCH:
1659                 case CXN_KILLED_AHS_RCVD:
1660                 case CXN_KILLED_HDR_DIGEST_ERR:
1661                 case CXN_KILLED_UNKNOWN_HDR:
1662                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1663                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1664                 case CXN_KILLED_TIMED_OUT:
1665                 case CXN_KILLED_FIN_RCVD:
1666                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1667                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1668                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1669                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1670                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1671                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1672                                  "0x%x...\n",
1673                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1674                                  32] & CQE_CODE_MASK,
1675                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1676                                  32] & CQE_CID_MASK));
1677                         iscsi_conn_failure(beiscsi_conn->conn,
1678                                            ISCSI_ERR_CONN_FAILED);
1679                         break;
1680                 case CXN_KILLED_RST_SENT:
1681                 case CXN_KILLED_RST_RCVD:
1682                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1683                                 "received/sent on CID 0x%x...\n",
1684                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1685                                  32] & CQE_CODE_MASK,
1686                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1687                                  32] & CQE_CID_MASK));
1688                         iscsi_conn_failure(beiscsi_conn->conn,
1689                                            ISCSI_ERR_CONN_FAILED);
1690                         break;
1691                 default:
1692                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1693                                  "received on CID 0x%x...\n",
1694                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1695                                  32] & CQE_CODE_MASK,
1696                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1697                                  32] & CQE_CID_MASK));
1698                         break;
1699                 }
1700
1701                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1702                 queue_tail_inc(cq);
1703                 sol = queue_tail_node(cq);
1704                 num_processed++;
1705         }
1706
1707         if (num_processed > 0) {
1708                 tot_nump += num_processed;
1709                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1710         }
1711         return tot_nump;
1712 }
1713
1714 void beiscsi_process_all_cqs(struct work_struct *work)
1715 {
1716         unsigned long flags;
1717         struct hwi_controller *phwi_ctrlr;
1718         struct hwi_context_memory *phwi_context;
1719         struct be_eq_obj *pbe_eq;
1720         struct beiscsi_hba *phba =
1721             container_of(work, struct beiscsi_hba, work_cqs);
1722
1723         phwi_ctrlr = phba->phwi_ctrlr;
1724         phwi_context = phwi_ctrlr->phwi_ctxt;
1725         if (phba->msix_enabled)
1726                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1727         else
1728                 pbe_eq = &phwi_context->be_eq[0];
1729
1730         if (phba->todo_mcc_cq) {
1731                 spin_lock_irqsave(&phba->isr_lock, flags);
1732                 phba->todo_mcc_cq = 0;
1733                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1734                 beiscsi_process_mcc_isr(phba);
1735         }
1736
1737         if (phba->todo_cq) {
1738                 spin_lock_irqsave(&phba->isr_lock, flags);
1739                 phba->todo_cq = 0;
1740                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1741                 beiscsi_process_cq(pbe_eq);
1742         }
1743 }
1744
1745 static int be_iopoll(struct blk_iopoll *iop, int budget)
1746 {
1747         static unsigned int ret;
1748         struct beiscsi_hba *phba;
1749         struct be_eq_obj *pbe_eq;
1750
1751         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1752         ret = beiscsi_process_cq(pbe_eq);
1753         if (ret < budget) {
1754                 phba = pbe_eq->phba;
1755                 blk_iopoll_complete(iop);
1756                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1757                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1758         }
1759         return ret;
1760 }
1761
1762 static void
1763 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1764               unsigned int num_sg, struct beiscsi_io_task *io_task)
1765 {
1766         struct iscsi_sge *psgl;
1767         unsigned short sg_len, index;
1768         unsigned int sge_len = 0;
1769         unsigned long long addr;
1770         struct scatterlist *l_sg;
1771         unsigned int offset;
1772
1773         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1774                                       io_task->bhs_pa.u.a32.address_lo);
1775         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1776                                       io_task->bhs_pa.u.a32.address_hi);
1777
1778         l_sg = sg;
1779         for (index = 0; (index < num_sg) && (index < 2); index++,
1780                                                          sg = sg_next(sg)) {
1781                 if (index == 0) {
1782                         sg_len = sg_dma_len(sg);
1783                         addr = (u64) sg_dma_address(sg);
1784                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1785                                                 ((u32)(addr & 0xFFFFFFFF)));
1786                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1787                                                         ((u32)(addr >> 32)));
1788                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1789                                                         sg_len);
1790                         sge_len = sg_len;
1791                 } else {
1792                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1793                                                         pwrb, sge_len);
1794                         sg_len = sg_dma_len(sg);
1795                         addr = (u64) sg_dma_address(sg);
1796                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1797                                                 ((u32)(addr & 0xFFFFFFFF)));
1798                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1799                                                         ((u32)(addr >> 32)));
1800                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1801                                                         sg_len);
1802                 }
1803         }
1804         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1805         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1806
1807         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1808
1809         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1810                         io_task->bhs_pa.u.a32.address_hi);
1811         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1812                         io_task->bhs_pa.u.a32.address_lo);
1813
1814         if (num_sg == 1) {
1815                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1816                                                                 1);
1817                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1818                                                                 0);
1819         } else if (num_sg == 2) {
1820                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1821                                                                 0);
1822                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1823                                                                 1);
1824         } else {
1825                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1826                                                                 0);
1827                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1828                                                                 0);
1829         }
1830         sg = l_sg;
1831         psgl++;
1832         psgl++;
1833         offset = 0;
1834         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1835                 sg_len = sg_dma_len(sg);
1836                 addr = (u64) sg_dma_address(sg);
1837                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1838                                                 (addr & 0xFFFFFFFF));
1839                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1840                                                 (addr >> 32));
1841                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1842                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1843                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1844                 offset += sg_len;
1845         }
1846         psgl--;
1847         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1848 }
1849
1850 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1851 {
1852         struct iscsi_sge *psgl;
1853         unsigned long long addr;
1854         struct beiscsi_io_task *io_task = task->dd_data;
1855         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1856         struct beiscsi_hba *phba = beiscsi_conn->phba;
1857
1858         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1859         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1860                                 io_task->bhs_pa.u.a32.address_lo);
1861         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1862                                 io_task->bhs_pa.u.a32.address_hi);
1863
1864         if (task->data) {
1865                 if (task->data_count) {
1866                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1867                         addr = (u64) pci_map_single(phba->pcidev,
1868                                                     task->data,
1869                                                     task->data_count, 1);
1870                 } else {
1871                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1872                         addr = 0;
1873                 }
1874                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1875                                                 ((u32)(addr & 0xFFFFFFFF)));
1876                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1877                                                 ((u32)(addr >> 32)));
1878                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1879                                                 task->data_count);
1880
1881                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1882         } else {
1883                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1884                 addr = 0;
1885         }
1886
1887         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1888
1889         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1890
1891         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1892                       io_task->bhs_pa.u.a32.address_hi);
1893         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1894                       io_task->bhs_pa.u.a32.address_lo);
1895         if (task->data) {
1896                 psgl++;
1897                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1898                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1899                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1900                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1901                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1902                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1903
1904                 psgl++;
1905                 if (task->data) {
1906                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1907                                                 ((u32)(addr & 0xFFFFFFFF)));
1908                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1909                                                 ((u32)(addr >> 32)));
1910                 }
1911                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1912         }
1913         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1914 }
1915
1916 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1917 {
1918         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1919         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1920         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1921
1922         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1923                                       sizeof(struct sol_cqe));
1924         num_async_pdu_buf_pages =
1925                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1926                                        phba->params.defpdu_hdr_sz);
1927         num_async_pdu_buf_sgl_pages =
1928                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1929                                        sizeof(struct phys_addr));
1930         num_async_pdu_data_pages =
1931                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1932                                        phba->params.defpdu_data_sz);
1933         num_async_pdu_data_sgl_pages =
1934                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1935                                        sizeof(struct phys_addr));
1936
1937         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1938
1939         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1940                                                  BE_ISCSI_PDU_HEADER_SIZE;
1941         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1942                                             sizeof(struct hwi_context_memory);
1943
1944
1945         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1946             * (phba->params.wrbs_per_cxn)
1947             * phba->params.cxns_per_ctrl;
1948         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1949                                  (phba->params.wrbs_per_cxn);
1950         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1951                                 phba->params.cxns_per_ctrl);
1952
1953         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1954                 phba->params.icds_per_ctrl;
1955         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1956                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1957
1958         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1959                 num_async_pdu_buf_pages * PAGE_SIZE;
1960         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1961                 num_async_pdu_data_pages * PAGE_SIZE;
1962         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1963                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1964         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1965                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1966         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1967                 phba->params.asyncpdus_per_ctrl *
1968                 sizeof(struct async_pdu_handle);
1969         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1970                 phba->params.asyncpdus_per_ctrl *
1971                 sizeof(struct async_pdu_handle);
1972         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1973                 sizeof(struct hwi_async_pdu_context) +
1974                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1975 }
1976
1977 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1978 {
1979         struct be_mem_descriptor *mem_descr;
1980         dma_addr_t bus_add;
1981         struct mem_array *mem_arr, *mem_arr_orig;
1982         unsigned int i, j, alloc_size, curr_alloc_size;
1983
1984         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1985         if (!phba->phwi_ctrlr)
1986                 return -ENOMEM;
1987
1988         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1989                                  GFP_KERNEL);
1990         if (!phba->init_mem) {
1991                 kfree(phba->phwi_ctrlr);
1992                 return -ENOMEM;
1993         }
1994
1995         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1996                                GFP_KERNEL);
1997         if (!mem_arr_orig) {
1998                 kfree(phba->init_mem);
1999                 kfree(phba->phwi_ctrlr);
2000                 return -ENOMEM;
2001         }
2002
2003         mem_descr = phba->init_mem;
2004         for (i = 0; i < SE_MEM_MAX; i++) {
2005                 j = 0;
2006                 mem_arr = mem_arr_orig;
2007                 alloc_size = phba->mem_req[i];
2008                 memset(mem_arr, 0, sizeof(struct mem_array) *
2009                        BEISCSI_MAX_FRAGS_INIT);
2010                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2011                 do {
2012                         mem_arr->virtual_address = pci_alloc_consistent(
2013                                                         phba->pcidev,
2014                                                         curr_alloc_size,
2015                                                         &bus_add);
2016                         if (!mem_arr->virtual_address) {
2017                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2018                                         goto free_mem;
2019                                 if (curr_alloc_size -
2020                                         rounddown_pow_of_two(curr_alloc_size))
2021                                         curr_alloc_size = rounddown_pow_of_two
2022                                                              (curr_alloc_size);
2023                                 else
2024                                         curr_alloc_size = curr_alloc_size / 2;
2025                         } else {
2026                                 mem_arr->bus_address.u.
2027                                     a64.address = (__u64) bus_add;
2028                                 mem_arr->size = curr_alloc_size;
2029                                 alloc_size -= curr_alloc_size;
2030                                 curr_alloc_size = min(be_max_phys_size *
2031                                                       1024, alloc_size);
2032                                 j++;
2033                                 mem_arr++;
2034                         }
2035                 } while (alloc_size);
2036                 mem_descr->num_elements = j;
2037                 mem_descr->size_in_bytes = phba->mem_req[i];
2038                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2039                                                GFP_KERNEL);
2040                 if (!mem_descr->mem_array)
2041                         goto free_mem;
2042
2043                 memcpy(mem_descr->mem_array, mem_arr_orig,
2044                        sizeof(struct mem_array) * j);
2045                 mem_descr++;
2046         }
2047         kfree(mem_arr_orig);
2048         return 0;
2049 free_mem:
2050         mem_descr->num_elements = j;
2051         while ((i) || (j)) {
2052                 for (j = mem_descr->num_elements; j > 0; j--) {
2053                         pci_free_consistent(phba->pcidev,
2054                                             mem_descr->mem_array[j - 1].size,
2055                                             mem_descr->mem_array[j - 1].
2056                                             virtual_address,
2057                                             (unsigned long)mem_descr->
2058                                             mem_array[j - 1].
2059                                             bus_address.u.a64.address);
2060                 }
2061                 if (i) {
2062                         i--;
2063                         kfree(mem_descr->mem_array);
2064                         mem_descr--;
2065                 }
2066         }
2067         kfree(mem_arr_orig);
2068         kfree(phba->init_mem);
2069         kfree(phba->phwi_ctrlr);
2070         return -ENOMEM;
2071 }
2072
2073 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2074 {
2075         beiscsi_find_mem_req(phba);
2076         return beiscsi_alloc_mem(phba);
2077 }
2078
2079 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2080 {
2081         struct pdu_data_out *pdata_out;
2082         struct pdu_nop_out *pnop_out;
2083         struct be_mem_descriptor *mem_descr;
2084
2085         mem_descr = phba->init_mem;
2086         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2087         pdata_out =
2088             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2089         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2090
2091         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2092                       IIOC_SCSI_DATA);
2093
2094         pnop_out =
2095             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2096                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2097
2098         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2099         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2100         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2101         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2102 }
2103
2104 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2105 {
2106         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2107         struct wrb_handle *pwrb_handle;
2108         struct hwi_controller *phwi_ctrlr;
2109         struct hwi_wrb_context *pwrb_context;
2110         struct iscsi_wrb *pwrb;
2111         unsigned int num_cxn_wrbh;
2112         unsigned int num_cxn_wrb, j, idx, index;
2113
2114         mem_descr_wrbh = phba->init_mem;
2115         mem_descr_wrbh += HWI_MEM_WRBH;
2116
2117         mem_descr_wrb = phba->init_mem;
2118         mem_descr_wrb += HWI_MEM_WRB;
2119
2120         idx = 0;
2121         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2122         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2123                         ((sizeof(struct wrb_handle)) *
2124                          phba->params.wrbs_per_cxn));
2125         phwi_ctrlr = phba->phwi_ctrlr;
2126
2127         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2128                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2129                 pwrb_context->pwrb_handle_base =
2130                                 kzalloc(sizeof(struct wrb_handle *) *
2131                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2132                 pwrb_context->pwrb_handle_basestd =
2133                                 kzalloc(sizeof(struct wrb_handle *) *
2134                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2135                 if (num_cxn_wrbh) {
2136                         pwrb_context->alloc_index = 0;
2137                         pwrb_context->wrb_handles_available = 0;
2138                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2139                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2140                                 pwrb_context->pwrb_handle_basestd[j] =
2141                                                                 pwrb_handle;
2142                                 pwrb_context->wrb_handles_available++;
2143                                 pwrb_handle->wrb_index = j;
2144                                 pwrb_handle++;
2145                         }
2146                         pwrb_context->free_index = 0;
2147                         num_cxn_wrbh--;
2148                 } else {
2149                         idx++;
2150                         pwrb_handle =
2151                             mem_descr_wrbh->mem_array[idx].virtual_address;
2152                         num_cxn_wrbh =
2153                             ((mem_descr_wrbh->mem_array[idx].size) /
2154                              ((sizeof(struct wrb_handle)) *
2155                               phba->params.wrbs_per_cxn));
2156                         pwrb_context->alloc_index = 0;
2157                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2158                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2159                                 pwrb_context->pwrb_handle_basestd[j] =
2160                                     pwrb_handle;
2161                                 pwrb_context->wrb_handles_available++;
2162                                 pwrb_handle->wrb_index = j;
2163                                 pwrb_handle++;
2164                         }
2165                         pwrb_context->free_index = 0;
2166                         num_cxn_wrbh--;
2167                 }
2168         }
2169         idx = 0;
2170         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2171         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2172                       ((sizeof(struct iscsi_wrb) *
2173                         phba->params.wrbs_per_cxn));
2174         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2175                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2176                 if (num_cxn_wrb) {
2177                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2178                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2179                                 pwrb_handle->pwrb = pwrb;
2180                                 pwrb++;
2181                         }
2182                         num_cxn_wrb--;
2183                 } else {
2184                         idx++;
2185                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2186                         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2187                                       ((sizeof(struct iscsi_wrb) *
2188                                         phba->params.wrbs_per_cxn));
2189                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2190                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2191                                 pwrb_handle->pwrb = pwrb;
2192                                 pwrb++;
2193                         }
2194                         num_cxn_wrb--;
2195                 }
2196         }
2197 }
2198
2199 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2200 {
2201         struct hwi_controller *phwi_ctrlr;
2202         struct hba_parameters *p = &phba->params;
2203         struct hwi_async_pdu_context *pasync_ctx;
2204         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2205         unsigned int index;
2206         struct be_mem_descriptor *mem_descr;
2207
2208         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2209         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2210
2211         phwi_ctrlr = phba->phwi_ctrlr;
2212         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2213                                 mem_descr->mem_array[0].virtual_address;
2214         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2215         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2216
2217         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2218         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2219         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2220         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2221
2222         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2223         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2224         if (mem_descr->mem_array[0].virtual_address) {
2225                 SE_DEBUG(DBG_LVL_8,
2226                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2227                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2228         } else
2229                 shost_printk(KERN_WARNING, phba->shost,
2230                              "No Virtual address\n");
2231
2232         pasync_ctx->async_header.va_base =
2233                         mem_descr->mem_array[0].virtual_address;
2234
2235         pasync_ctx->async_header.pa_base.u.a64.address =
2236                         mem_descr->mem_array[0].bus_address.u.a64.address;
2237
2238         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2239         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2240         if (mem_descr->mem_array[0].virtual_address) {
2241                 SE_DEBUG(DBG_LVL_8,
2242                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2243                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2244         } else
2245                 shost_printk(KERN_WARNING, phba->shost,
2246                             "No Virtual address\n");
2247         pasync_ctx->async_header.ring_base =
2248                         mem_descr->mem_array[0].virtual_address;
2249
2250         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2251         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2252         if (mem_descr->mem_array[0].virtual_address) {
2253                 SE_DEBUG(DBG_LVL_8,
2254                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2255                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2256         } else
2257                 shost_printk(KERN_WARNING, phba->shost,
2258                             "No Virtual address\n");
2259
2260         pasync_ctx->async_header.handle_base =
2261                         mem_descr->mem_array[0].virtual_address;
2262         pasync_ctx->async_header.writables = 0;
2263         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2264
2265         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2266         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2267         if (mem_descr->mem_array[0].virtual_address) {
2268                 SE_DEBUG(DBG_LVL_8,
2269                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2270                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2271         } else
2272                 shost_printk(KERN_WARNING, phba->shost,
2273                             "No Virtual address\n");
2274         pasync_ctx->async_data.va_base =
2275                         mem_descr->mem_array[0].virtual_address;
2276         pasync_ctx->async_data.pa_base.u.a64.address =
2277                         mem_descr->mem_array[0].bus_address.u.a64.address;
2278
2279         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2280         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2281         if (mem_descr->mem_array[0].virtual_address) {
2282                 SE_DEBUG(DBG_LVL_8,
2283                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2284                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2285         } else
2286                 shost_printk(KERN_WARNING, phba->shost,
2287                              "No Virtual address\n");
2288
2289         pasync_ctx->async_data.ring_base =
2290                         mem_descr->mem_array[0].virtual_address;
2291
2292         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2293         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2294         if (!mem_descr->mem_array[0].virtual_address)
2295                 shost_printk(KERN_WARNING, phba->shost,
2296                             "No Virtual address\n");
2297
2298         pasync_ctx->async_data.handle_base =
2299                         mem_descr->mem_array[0].virtual_address;
2300         pasync_ctx->async_data.writables = 0;
2301         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2302
2303         pasync_header_h =
2304                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2305         pasync_data_h =
2306                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2307
2308         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2309                 pasync_header_h->cri = -1;
2310                 pasync_header_h->index = (char)index;
2311                 INIT_LIST_HEAD(&pasync_header_h->link);
2312                 pasync_header_h->pbuffer =
2313                         (void *)((unsigned long)
2314                         (pasync_ctx->async_header.va_base) +
2315                         (p->defpdu_hdr_sz * index));
2316
2317                 pasync_header_h->pa.u.a64.address =
2318                         pasync_ctx->async_header.pa_base.u.a64.address +
2319                         (p->defpdu_hdr_sz * index);
2320
2321                 list_add_tail(&pasync_header_h->link,
2322                                 &pasync_ctx->async_header.free_list);
2323                 pasync_header_h++;
2324                 pasync_ctx->async_header.free_entries++;
2325                 pasync_ctx->async_header.writables++;
2326
2327                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2328                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2329                                header_busy_list);
2330                 pasync_data_h->cri = -1;
2331                 pasync_data_h->index = (char)index;
2332                 INIT_LIST_HEAD(&pasync_data_h->link);
2333                 pasync_data_h->pbuffer =
2334                         (void *)((unsigned long)
2335                         (pasync_ctx->async_data.va_base) +
2336                         (p->defpdu_data_sz * index));
2337
2338                 pasync_data_h->pa.u.a64.address =
2339                     pasync_ctx->async_data.pa_base.u.a64.address +
2340                     (p->defpdu_data_sz * index);
2341
2342                 list_add_tail(&pasync_data_h->link,
2343                               &pasync_ctx->async_data.free_list);
2344                 pasync_data_h++;
2345                 pasync_ctx->async_data.free_entries++;
2346                 pasync_ctx->async_data.writables++;
2347
2348                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2349         }
2350
2351         pasync_ctx->async_header.host_write_ptr = 0;
2352         pasync_ctx->async_header.ep_read_ptr = -1;
2353         pasync_ctx->async_data.host_write_ptr = 0;
2354         pasync_ctx->async_data.ep_read_ptr = -1;
2355 }
2356
2357 static int
2358 be_sgl_create_contiguous(void *virtual_address,
2359                          u64 physical_address, u32 length,
2360                          struct be_dma_mem *sgl)
2361 {
2362         WARN_ON(!virtual_address);
2363         WARN_ON(!physical_address);
2364         WARN_ON(!length > 0);
2365         WARN_ON(!sgl);
2366
2367         sgl->va = virtual_address;
2368         sgl->dma = (unsigned long)physical_address;
2369         sgl->size = length;
2370
2371         return 0;
2372 }
2373
2374 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2375 {
2376         memset(sgl, 0, sizeof(*sgl));
2377 }
2378
2379 static void
2380 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2381                      struct mem_array *pmem, struct be_dma_mem *sgl)
2382 {
2383         if (sgl->va)
2384                 be_sgl_destroy_contiguous(sgl);
2385
2386         be_sgl_create_contiguous(pmem->virtual_address,
2387                                  pmem->bus_address.u.a64.address,
2388                                  pmem->size, sgl);
2389 }
2390
2391 static void
2392 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2393                            struct mem_array *pmem, struct be_dma_mem *sgl)
2394 {
2395         if (sgl->va)
2396                 be_sgl_destroy_contiguous(sgl);
2397
2398         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2399                                  pmem->bus_address.u.a64.address,
2400                                  pmem->size, sgl);
2401 }
2402
2403 static int be_fill_queue(struct be_queue_info *q,
2404                 u16 len, u16 entry_size, void *vaddress)
2405 {
2406         struct be_dma_mem *mem = &q->dma_mem;
2407
2408         memset(q, 0, sizeof(*q));
2409         q->len = len;
2410         q->entry_size = entry_size;
2411         mem->size = len * entry_size;
2412         mem->va = vaddress;
2413         if (!mem->va)
2414                 return -ENOMEM;
2415         memset(mem->va, 0, mem->size);
2416         return 0;
2417 }
2418
2419 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2420                              struct hwi_context_memory *phwi_context)
2421 {
2422         unsigned int i, num_eq_pages;
2423         int ret, eq_for_mcc;
2424         struct be_queue_info *eq;
2425         struct be_dma_mem *mem;
2426         void *eq_vaddress;
2427         dma_addr_t paddr;
2428
2429         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2430                                       sizeof(struct be_eq_entry));
2431
2432         if (phba->msix_enabled)
2433                 eq_for_mcc = 1;
2434         else
2435                 eq_for_mcc = 0;
2436         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2437                 eq = &phwi_context->be_eq[i].q;
2438                 mem = &eq->dma_mem;
2439                 phwi_context->be_eq[i].phba = phba;
2440                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2441                                                      num_eq_pages * PAGE_SIZE,
2442                                                      &paddr);
2443                 if (!eq_vaddress)
2444                         goto create_eq_error;
2445
2446                 mem->va = eq_vaddress;
2447                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2448                                     sizeof(struct be_eq_entry), eq_vaddress);
2449                 if (ret) {
2450                         shost_printk(KERN_ERR, phba->shost,
2451                                      "be_fill_queue Failed for EQ\n");
2452                         goto create_eq_error;
2453                 }
2454
2455                 mem->dma = paddr;
2456                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2457                                             phwi_context->cur_eqd);
2458                 if (ret) {
2459                         shost_printk(KERN_ERR, phba->shost,
2460                                      "beiscsi_cmd_eq_create"
2461                                      "Failedfor EQ\n");
2462                         goto create_eq_error;
2463                 }
2464                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2465         }
2466         return 0;
2467 create_eq_error:
2468         for (i = 0; i < (phba->num_cpus + 1); i++) {
2469                 eq = &phwi_context->be_eq[i].q;
2470                 mem = &eq->dma_mem;
2471                 if (mem->va)
2472                         pci_free_consistent(phba->pcidev, num_eq_pages
2473                                             * PAGE_SIZE,
2474                                             mem->va, mem->dma);
2475         }
2476         return ret;
2477 }
2478
2479 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2480                              struct hwi_context_memory *phwi_context)
2481 {
2482         unsigned int i, num_cq_pages;
2483         int ret;
2484         struct be_queue_info *cq, *eq;
2485         struct be_dma_mem *mem;
2486         struct be_eq_obj *pbe_eq;
2487         void *cq_vaddress;
2488         dma_addr_t paddr;
2489
2490         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2491                                       sizeof(struct sol_cqe));
2492
2493         for (i = 0; i < phba->num_cpus; i++) {
2494                 cq = &phwi_context->be_cq[i];
2495                 eq = &phwi_context->be_eq[i].q;
2496                 pbe_eq = &phwi_context->be_eq[i];
2497                 pbe_eq->cq = cq;
2498                 pbe_eq->phba = phba;
2499                 mem = &cq->dma_mem;
2500                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2501                                                      num_cq_pages * PAGE_SIZE,
2502                                                      &paddr);
2503                 if (!cq_vaddress)
2504                         goto create_cq_error;
2505                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2506                                     sizeof(struct sol_cqe), cq_vaddress);
2507                 if (ret) {
2508                         shost_printk(KERN_ERR, phba->shost,
2509                                      "be_fill_queue Failed for ISCSI CQ\n");
2510                         goto create_cq_error;
2511                 }
2512
2513                 mem->dma = paddr;
2514                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2515                                             false, 0);
2516                 if (ret) {
2517                         shost_printk(KERN_ERR, phba->shost,
2518                                      "beiscsi_cmd_eq_create"
2519                                      "Failed for ISCSI CQ\n");
2520                         goto create_cq_error;
2521                 }
2522                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2523                                                  cq->id, eq->id);
2524                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2525         }
2526         return 0;
2527
2528 create_cq_error:
2529         for (i = 0; i < phba->num_cpus; i++) {
2530                 cq = &phwi_context->be_cq[i];
2531                 mem = &cq->dma_mem;
2532                 if (mem->va)
2533                         pci_free_consistent(phba->pcidev, num_cq_pages
2534                                             * PAGE_SIZE,
2535                                             mem->va, mem->dma);
2536         }
2537         return ret;
2538
2539 }
2540
2541 static int
2542 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2543                        struct hwi_context_memory *phwi_context,
2544                        struct hwi_controller *phwi_ctrlr,
2545                        unsigned int def_pdu_ring_sz)
2546 {
2547         unsigned int idx;
2548         int ret;
2549         struct be_queue_info *dq, *cq;
2550         struct be_dma_mem *mem;
2551         struct be_mem_descriptor *mem_descr;
2552         void *dq_vaddress;
2553
2554         idx = 0;
2555         dq = &phwi_context->be_def_hdrq;
2556         cq = &phwi_context->be_cq[0];
2557         mem = &dq->dma_mem;
2558         mem_descr = phba->init_mem;
2559         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2560         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2561         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2562                             sizeof(struct phys_addr),
2563                             sizeof(struct phys_addr), dq_vaddress);
2564         if (ret) {
2565                 shost_printk(KERN_ERR, phba->shost,
2566                              "be_fill_queue Failed for DEF PDU HDR\n");
2567                 return ret;
2568         }
2569         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2570                                   bus_address.u.a64.address;
2571         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2572                                               def_pdu_ring_sz,
2573                                               phba->params.defpdu_hdr_sz);
2574         if (ret) {
2575                 shost_printk(KERN_ERR, phba->shost,
2576                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2577                 return ret;
2578         }
2579         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2580         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2581                  phwi_context->be_def_hdrq.id);
2582         hwi_post_async_buffers(phba, 1);
2583         return 0;
2584 }
2585
2586 static int
2587 beiscsi_create_def_data(struct beiscsi_hba *phba,
2588                         struct hwi_context_memory *phwi_context,
2589                         struct hwi_controller *phwi_ctrlr,
2590                         unsigned int def_pdu_ring_sz)
2591 {
2592         unsigned int idx;
2593         int ret;
2594         struct be_queue_info *dataq, *cq;
2595         struct be_dma_mem *mem;
2596         struct be_mem_descriptor *mem_descr;
2597         void *dq_vaddress;
2598
2599         idx = 0;
2600         dataq = &phwi_context->be_def_dataq;
2601         cq = &phwi_context->be_cq[0];
2602         mem = &dataq->dma_mem;
2603         mem_descr = phba->init_mem;
2604         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2605         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2606         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2607                             sizeof(struct phys_addr),
2608                             sizeof(struct phys_addr), dq_vaddress);
2609         if (ret) {
2610                 shost_printk(KERN_ERR, phba->shost,
2611                              "be_fill_queue Failed for DEF PDU DATA\n");
2612                 return ret;
2613         }
2614         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2615                                   bus_address.u.a64.address;
2616         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2617                                               def_pdu_ring_sz,
2618                                               phba->params.defpdu_data_sz);
2619         if (ret) {
2620                 shost_printk(KERN_ERR, phba->shost,
2621                              "be_cmd_create_default_pdu_queue Failed"
2622                              " for DEF PDU DATA\n");
2623                 return ret;
2624         }
2625         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2626         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2627                  phwi_context->be_def_dataq.id);
2628         hwi_post_async_buffers(phba, 0);
2629         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2630         return 0;
2631 }
2632
2633 static int
2634 beiscsi_post_pages(struct beiscsi_hba *phba)
2635 {
2636         struct be_mem_descriptor *mem_descr;
2637         struct mem_array *pm_arr;
2638         unsigned int page_offset, i;
2639         struct be_dma_mem sgl;
2640         int status;
2641
2642         mem_descr = phba->init_mem;
2643         mem_descr += HWI_MEM_SGE;
2644         pm_arr = mem_descr->mem_array;
2645
2646         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2647                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2648         for (i = 0; i < mem_descr->num_elements; i++) {
2649                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2650                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2651                                                 page_offset,
2652                                                 (pm_arr->size / PAGE_SIZE));
2653                 page_offset += pm_arr->size / PAGE_SIZE;
2654                 if (status != 0) {
2655                         shost_printk(KERN_ERR, phba->shost,
2656                                      "post sgl failed.\n");
2657                         return status;
2658                 }
2659                 pm_arr++;
2660         }
2661         SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2662         return 0;
2663 }
2664
2665 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2666 {
2667         struct be_dma_mem *mem = &q->dma_mem;
2668         if (mem->va)
2669                 pci_free_consistent(phba->pcidev, mem->size,
2670                         mem->va, mem->dma);
2671 }
2672
2673 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2674                 u16 len, u16 entry_size)
2675 {
2676         struct be_dma_mem *mem = &q->dma_mem;
2677
2678         memset(q, 0, sizeof(*q));
2679         q->len = len;
2680         q->entry_size = entry_size;
2681         mem->size = len * entry_size;
2682         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2683         if (!mem->va)
2684                 return -1;
2685         memset(mem->va, 0, mem->size);
2686         return 0;
2687 }
2688
2689 static int
2690 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2691                          struct hwi_context_memory *phwi_context,
2692                          struct hwi_controller *phwi_ctrlr)
2693 {
2694         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2695         u64 pa_addr_lo;
2696         unsigned int idx, num, i;
2697         struct mem_array *pwrb_arr;
2698         void *wrb_vaddr;
2699         struct be_dma_mem sgl;
2700         struct be_mem_descriptor *mem_descr;
2701         int status;
2702
2703         idx = 0;
2704         mem_descr = phba->init_mem;
2705         mem_descr += HWI_MEM_WRB;
2706         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2707                            GFP_KERNEL);
2708         if (!pwrb_arr) {
2709                 shost_printk(KERN_ERR, phba->shost,
2710                              "Memory alloc failed in create wrb ring.\n");
2711                 return -ENOMEM;
2712         }
2713         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2714         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2715         num_wrb_rings = mem_descr->mem_array[idx].size /
2716                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2717
2718         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2719                 if (num_wrb_rings) {
2720                         pwrb_arr[num].virtual_address = wrb_vaddr;
2721                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2722                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2723                                             sizeof(struct iscsi_wrb);
2724                         wrb_vaddr += pwrb_arr[num].size;
2725                         pa_addr_lo += pwrb_arr[num].size;
2726                         num_wrb_rings--;
2727                 } else {
2728                         idx++;
2729                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2730                         pa_addr_lo = mem_descr->mem_array[idx].\
2731                                         bus_address.u.a64.address;
2732                         num_wrb_rings = mem_descr->mem_array[idx].size /
2733                                         (phba->params.wrbs_per_cxn *
2734                                         sizeof(struct iscsi_wrb));
2735                         pwrb_arr[num].virtual_address = wrb_vaddr;
2736                         pwrb_arr[num].bus_address.u.a64.address\
2737                                                 = pa_addr_lo;
2738                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2739                                                  sizeof(struct iscsi_wrb);
2740                         wrb_vaddr += pwrb_arr[num].size;
2741                         pa_addr_lo   += pwrb_arr[num].size;
2742                         num_wrb_rings--;
2743                 }
2744         }
2745         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2746                 wrb_mem_index = 0;
2747                 offset = 0;
2748                 size = 0;
2749
2750                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2751                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2752                                             &phwi_context->be_wrbq[i]);
2753                 if (status != 0) {
2754                         shost_printk(KERN_ERR, phba->shost,
2755                                      "wrbq create failed.");
2756                         kfree(pwrb_arr);
2757                         return status;
2758                 }
2759                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2760                                                                    id;
2761         }
2762         kfree(pwrb_arr);
2763         return 0;
2764 }
2765
2766 static void free_wrb_handles(struct beiscsi_hba *phba)
2767 {
2768         unsigned int index;
2769         struct hwi_controller *phwi_ctrlr;
2770         struct hwi_wrb_context *pwrb_context;
2771
2772         phwi_ctrlr = phba->phwi_ctrlr;
2773         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2774                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2775                 kfree(pwrb_context->pwrb_handle_base);
2776                 kfree(pwrb_context->pwrb_handle_basestd);
2777         }
2778 }
2779
2780 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2781 {
2782         struct be_queue_info *q;
2783         struct be_ctrl_info *ctrl = &phba->ctrl;
2784
2785         q = &phba->ctrl.mcc_obj.q;
2786         if (q->created)
2787                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2788         be_queue_free(phba, q);
2789
2790         q = &phba->ctrl.mcc_obj.cq;
2791         if (q->created)
2792                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2793         be_queue_free(phba, q);
2794 }
2795
2796 static void hwi_cleanup(struct beiscsi_hba *phba)
2797 {
2798         struct be_queue_info *q;
2799         struct be_ctrl_info *ctrl = &phba->ctrl;
2800         struct hwi_controller *phwi_ctrlr;
2801         struct hwi_context_memory *phwi_context;
2802         int i, eq_num;
2803
2804         phwi_ctrlr = phba->phwi_ctrlr;
2805         phwi_context = phwi_ctrlr->phwi_ctxt;
2806         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2807                 q = &phwi_context->be_wrbq[i];
2808                 if (q->created)
2809                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2810         }
2811         free_wrb_handles(phba);
2812
2813         q = &phwi_context->be_def_hdrq;
2814         if (q->created)
2815                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2816
2817         q = &phwi_context->be_def_dataq;
2818         if (q->created)
2819                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2820
2821         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2822
2823         for (i = 0; i < (phba->num_cpus); i++) {
2824                 q = &phwi_context->be_cq[i];
2825                 if (q->created)
2826                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2827         }
2828         if (phba->msix_enabled)
2829                 eq_num = 1;
2830         else
2831                 eq_num = 0;
2832         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2833                 q = &phwi_context->be_eq[i].q;
2834                 if (q->created)
2835                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2836         }
2837         be_mcc_queues_destroy(phba);
2838 }
2839
2840 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2841                                 struct hwi_context_memory *phwi_context)
2842 {
2843         struct be_queue_info *q, *cq;
2844         struct be_ctrl_info *ctrl = &phba->ctrl;
2845
2846         /* Alloc MCC compl queue */
2847         cq = &phba->ctrl.mcc_obj.cq;
2848         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2849                         sizeof(struct be_mcc_compl)))
2850                 goto err;
2851         /* Ask BE to create MCC compl queue; */
2852         if (phba->msix_enabled) {
2853                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2854                                          [phba->num_cpus].q, false, true, 0))
2855                 goto mcc_cq_free;
2856         } else {
2857                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2858                                           false, true, 0))
2859                 goto mcc_cq_free;
2860         }
2861
2862         /* Alloc MCC queue */
2863         q = &phba->ctrl.mcc_obj.q;
2864         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2865                 goto mcc_cq_destroy;
2866
2867         /* Ask BE to create MCC queue */
2868         if (beiscsi_cmd_mccq_create(phba, q, cq))
2869                 goto mcc_q_free;
2870
2871         return 0;
2872
2873 mcc_q_free:
2874         be_queue_free(phba, q);
2875 mcc_cq_destroy:
2876         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2877 mcc_cq_free:
2878         be_queue_free(phba, cq);
2879 err:
2880         return -1;
2881 }
2882
2883 static int find_num_cpus(void)
2884 {
2885         int  num_cpus = 0;
2886
2887         num_cpus = num_online_cpus();
2888         if (num_cpus >= MAX_CPUS)
2889                 num_cpus = MAX_CPUS - 1;
2890
2891         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
2892         return num_cpus;
2893 }
2894
2895 static int hwi_init_port(struct beiscsi_hba *phba)
2896 {
2897         struct hwi_controller *phwi_ctrlr;
2898         struct hwi_context_memory *phwi_context;
2899         unsigned int def_pdu_ring_sz;
2900         struct be_ctrl_info *ctrl = &phba->ctrl;
2901         int status;
2902
2903         def_pdu_ring_sz =
2904                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2905         phwi_ctrlr = phba->phwi_ctrlr;
2906         phwi_context = phwi_ctrlr->phwi_ctxt;
2907         phwi_context->max_eqd = 0;
2908         phwi_context->min_eqd = 0;
2909         phwi_context->cur_eqd = 64;
2910         be_cmd_fw_initialize(&phba->ctrl);
2911
2912         status = beiscsi_create_eqs(phba, phwi_context);
2913         if (status != 0) {
2914                 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
2915                 goto error;
2916         }
2917
2918         status = be_mcc_queues_create(phba, phwi_context);
2919         if (status != 0)
2920                 goto error;
2921
2922         status = mgmt_check_supported_fw(ctrl, phba);
2923         if (status != 0) {
2924                 shost_printk(KERN_ERR, phba->shost,
2925                              "Unsupported fw version\n");
2926                 goto error;
2927         }
2928
2929         status = beiscsi_create_cqs(phba, phwi_context);
2930         if (status != 0) {
2931                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2932                 goto error;
2933         }
2934
2935         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2936                                         def_pdu_ring_sz);
2937         if (status != 0) {
2938                 shost_printk(KERN_ERR, phba->shost,
2939                              "Default Header not created\n");
2940                 goto error;
2941         }
2942
2943         status = beiscsi_create_def_data(phba, phwi_context,
2944                                          phwi_ctrlr, def_pdu_ring_sz);
2945         if (status != 0) {
2946                 shost_printk(KERN_ERR, phba->shost,
2947                              "Default Data not created\n");
2948                 goto error;
2949         }
2950
2951         status = beiscsi_post_pages(phba);
2952         if (status != 0) {
2953                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2954                 goto error;
2955         }
2956
2957         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2958         if (status != 0) {
2959                 shost_printk(KERN_ERR, phba->shost,
2960                              "WRB Rings not created\n");
2961                 goto error;
2962         }
2963
2964         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2965         return 0;
2966
2967 error:
2968         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2969         hwi_cleanup(phba);
2970         return -ENOMEM;
2971 }
2972
2973 static int hwi_init_controller(struct beiscsi_hba *phba)
2974 {
2975         struct hwi_controller *phwi_ctrlr;
2976
2977         phwi_ctrlr = phba->phwi_ctrlr;
2978         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2979                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2980                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2981                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
2982                          phwi_ctrlr->phwi_ctxt);
2983         } else {
2984                 shost_printk(KERN_ERR, phba->shost,
2985                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2986                              "Failing to load\n");
2987                 return -ENOMEM;
2988         }
2989
2990         iscsi_init_global_templates(phba);
2991         beiscsi_init_wrb_handle(phba);
2992         hwi_init_async_pdu_ctx(phba);
2993         if (hwi_init_port(phba) != 0) {
2994                 shost_printk(KERN_ERR, phba->shost,
2995                              "hwi_init_controller failed\n");
2996                 return -ENOMEM;
2997         }
2998         return 0;
2999 }
3000
3001 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3002 {
3003         struct be_mem_descriptor *mem_descr;
3004         int i, j;
3005
3006         mem_descr = phba->init_mem;
3007         i = 0;
3008         j = 0;
3009         for (i = 0; i < SE_MEM_MAX; i++) {
3010                 for (j = mem_descr->num_elements; j > 0; j--) {
3011                         pci_free_consistent(phba->pcidev,
3012                           mem_descr->mem_array[j - 1].size,
3013                           mem_descr->mem_array[j - 1].virtual_address,
3014                           (unsigned long)mem_descr->mem_array[j - 1].
3015                           bus_address.u.a64.address);
3016                 }
3017                 kfree(mem_descr->mem_array);
3018                 mem_descr++;
3019         }
3020         kfree(phba->init_mem);
3021         kfree(phba->phwi_ctrlr);
3022 }
3023
3024 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3025 {
3026         int ret = -ENOMEM;
3027
3028         ret = beiscsi_get_memory(phba);
3029         if (ret < 0) {
3030                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3031                              "Failed in beiscsi_alloc_memory\n");
3032                 return ret;
3033         }
3034
3035         ret = hwi_init_controller(phba);
3036         if (ret)
3037                 goto free_init;
3038         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3039         return 0;
3040
3041 free_init:
3042         beiscsi_free_mem(phba);
3043         return -ENOMEM;
3044 }
3045
3046 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3047 {
3048         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3049         struct sgl_handle *psgl_handle;
3050         struct iscsi_sge *pfrag;
3051         unsigned int arr_index, i, idx;
3052
3053         phba->io_sgl_hndl_avbl = 0;
3054         phba->eh_sgl_hndl_avbl = 0;
3055
3056         mem_descr_sglh = phba->init_mem;
3057         mem_descr_sglh += HWI_MEM_SGLH;
3058         if (1 == mem_descr_sglh->num_elements) {
3059                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3060                                                  phba->params.ios_per_ctrl,
3061                                                  GFP_KERNEL);
3062                 if (!phba->io_sgl_hndl_base) {
3063                         shost_printk(KERN_ERR, phba->shost,
3064                                      "Mem Alloc Failed. Failing to load\n");
3065                         return -ENOMEM;
3066                 }
3067                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3068                                                  (phba->params.icds_per_ctrl -
3069                                                  phba->params.ios_per_ctrl),
3070                                                  GFP_KERNEL);
3071                 if (!phba->eh_sgl_hndl_base) {
3072                         kfree(phba->io_sgl_hndl_base);
3073                         shost_printk(KERN_ERR, phba->shost,
3074                                      "Mem Alloc Failed. Failing to load\n");
3075                         return -ENOMEM;
3076                 }
3077         } else {
3078                 shost_printk(KERN_ERR, phba->shost,
3079                              "HWI_MEM_SGLH is more than one element."
3080                              "Failing to load\n");
3081                 return -ENOMEM;
3082         }
3083
3084         arr_index = 0;
3085         idx = 0;
3086         while (idx < mem_descr_sglh->num_elements) {
3087                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3088
3089                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3090                       sizeof(struct sgl_handle)); i++) {
3091                         if (arr_index < phba->params.ios_per_ctrl) {
3092                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3093                                 phba->io_sgl_hndl_avbl++;
3094                                 arr_index++;
3095                         } else {
3096                                 phba->eh_sgl_hndl_base[arr_index -
3097                                         phba->params.ios_per_ctrl] =
3098                                                                 psgl_handle;
3099                                 arr_index++;
3100                                 phba->eh_sgl_hndl_avbl++;
3101                         }
3102                         psgl_handle++;
3103                 }
3104                 idx++;
3105         }
3106         SE_DEBUG(DBG_LVL_8,
3107                  "phba->io_sgl_hndl_avbl=%d"
3108                  "phba->eh_sgl_hndl_avbl=%d\n",
3109                  phba->io_sgl_hndl_avbl,
3110                  phba->eh_sgl_hndl_avbl);
3111         mem_descr_sg = phba->init_mem;
3112         mem_descr_sg += HWI_MEM_SGE;
3113         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3114                  mem_descr_sg->num_elements);
3115         arr_index = 0;
3116         idx = 0;
3117         while (idx < mem_descr_sg->num_elements) {
3118                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3119
3120                 for (i = 0;
3121                      i < (mem_descr_sg->mem_array[idx].size) /
3122                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3123                      i++) {
3124                         if (arr_index < phba->params.ios_per_ctrl)
3125                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3126                         else
3127                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3128                                                 phba->params.ios_per_ctrl];
3129                         psgl_handle->pfrag = pfrag;
3130                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3131                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3132                         pfrag += phba->params.num_sge_per_io;
3133                         psgl_handle->sgl_index =
3134                                 phba->fw_config.iscsi_icd_start + arr_index++;
3135                 }
3136                 idx++;
3137         }
3138         phba->io_sgl_free_index = 0;
3139         phba->io_sgl_alloc_index = 0;
3140         phba->eh_sgl_free_index = 0;
3141         phba->eh_sgl_alloc_index = 0;
3142         return 0;
3143 }
3144
3145 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3146 {
3147         int i, new_cid;
3148
3149         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3150                                   GFP_KERNEL);
3151         if (!phba->cid_array) {
3152                 shost_printk(KERN_ERR, phba->shost,
3153                              "Failed to allocate memory in "
3154                              "hba_setup_cid_tbls\n");
3155                 return -ENOMEM;
3156         }
3157         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3158                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3159         if (!phba->ep_array) {
3160                 shost_printk(KERN_ERR, phba->shost,
3161                              "Failed to allocate memory in "
3162                              "hba_setup_cid_tbls\n");
3163                 kfree(phba->cid_array);
3164                 return -ENOMEM;
3165         }
3166         new_cid = phba->fw_config.iscsi_cid_start;
3167         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3168                 phba->cid_array[i] = new_cid;
3169                 new_cid += 2;
3170         }
3171         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3172         return 0;
3173 }
3174
3175 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3176 {
3177         struct be_ctrl_info *ctrl = &phba->ctrl;
3178         struct hwi_controller *phwi_ctrlr;
3179         struct hwi_context_memory *phwi_context;
3180         struct be_queue_info *eq;
3181         u8 __iomem *addr;
3182         u32 reg, i;
3183         u32 enabled;
3184
3185         phwi_ctrlr = phba->phwi_ctrlr;
3186         phwi_context = phwi_ctrlr->phwi_ctxt;
3187
3188         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3189                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3190         reg = ioread32(addr);
3191         SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
3192
3193         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3194         if (!enabled) {
3195                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3196                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3197                 iowrite32(reg, addr);
3198                 if (!phba->msix_enabled) {
3199                         eq = &phwi_context->be_eq[0].q;
3200                         SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3201                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3202                 } else {
3203                         for (i = 0; i <= phba->num_cpus; i++) {
3204                                 eq = &phwi_context->be_eq[i].q;
3205                                 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3206                                 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3207                         }
3208                 }
3209         }
3210         return true;
3211 }
3212
3213 static void hwi_disable_intr(struct beiscsi_hba *phba)
3214 {
3215         struct be_ctrl_info *ctrl = &phba->ctrl;
3216
3217         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3218         u32 reg = ioread32(addr);
3219
3220         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3221         if (enabled) {
3222                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3223                 iowrite32(reg, addr);
3224         } else
3225                 shost_printk(KERN_WARNING, phba->shost,
3226                              "In hwi_disable_intr, Already Disabled\n");
3227 }
3228
3229 static int beiscsi_init_port(struct beiscsi_hba *phba)
3230 {
3231         int ret;
3232
3233         ret = beiscsi_init_controller(phba);
3234         if (ret < 0) {
3235                 shost_printk(KERN_ERR, phba->shost,
3236                              "beiscsi_dev_probe - Failed in"
3237                              "beiscsi_init_controller\n");
3238                 return ret;
3239         }
3240         ret = beiscsi_init_sgl_handle(phba);
3241         if (ret < 0) {
3242                 shost_printk(KERN_ERR, phba->shost,
3243                              "beiscsi_dev_probe - Failed in"
3244                              "beiscsi_init_sgl_handle\n");
3245                 goto do_cleanup_ctrlr;
3246         }
3247
3248         if (hba_setup_cid_tbls(phba)) {
3249                 shost_printk(KERN_ERR, phba->shost,
3250                              "Failed in hba_setup_cid_tbls\n");
3251                 kfree(phba->io_sgl_hndl_base);
3252                 kfree(phba->eh_sgl_hndl_base);
3253                 goto do_cleanup_ctrlr;
3254         }
3255
3256         return ret;
3257
3258 do_cleanup_ctrlr:
3259         hwi_cleanup(phba);
3260         return ret;
3261 }
3262
3263 static void hwi_purge_eq(struct beiscsi_hba *phba)
3264 {
3265         struct hwi_controller *phwi_ctrlr;
3266         struct hwi_context_memory *phwi_context;
3267         struct be_queue_info *eq;
3268         struct be_eq_entry *eqe = NULL;
3269         int i, eq_msix;
3270         unsigned int num_processed;
3271
3272         phwi_ctrlr = phba->phwi_ctrlr;
3273         phwi_context = phwi_ctrlr->phwi_ctxt;
3274         if (phba->msix_enabled)
3275                 eq_msix = 1;
3276         else
3277                 eq_msix = 0;
3278
3279         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3280                 eq = &phwi_context->be_eq[i].q;
3281                 eqe = queue_tail_node(eq);
3282                 num_processed = 0;
3283                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3284                                         & EQE_VALID_MASK) {
3285                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3286                         queue_tail_inc(eq);
3287                         eqe = queue_tail_node(eq);
3288                         num_processed++;
3289                 }
3290
3291                 if (num_processed)
3292                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3293         }
3294 }
3295
3296 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3297 {
3298         unsigned char mgmt_status;
3299
3300         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3301         if (mgmt_status)
3302                 shost_printk(KERN_WARNING, phba->shost,
3303                              "mgmt_epfw_cleanup FAILED\n");
3304
3305         hwi_purge_eq(phba);
3306         hwi_cleanup(phba);
3307         kfree(phba->io_sgl_hndl_base);
3308         kfree(phba->eh_sgl_hndl_base);
3309         kfree(phba->cid_array);
3310         kfree(phba->ep_array);
3311 }
3312
3313 void
3314 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3315                            struct beiscsi_offload_params *params)
3316 {
3317         struct wrb_handle *pwrb_handle;
3318         struct iscsi_target_context_update_wrb *pwrb = NULL;
3319         struct be_mem_descriptor *mem_descr;
3320         struct beiscsi_hba *phba = beiscsi_conn->phba;
3321         u32 doorbell = 0;
3322
3323         /*
3324          * We can always use 0 here because it is reserved by libiscsi for
3325          * login/startup related tasks.
3326          */
3327         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3328                                        phba->fw_config.iscsi_cid_start));
3329         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3330         memset(pwrb, 0, sizeof(*pwrb));
3331         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3332                       max_burst_length, pwrb, params->dw[offsetof
3333                       (struct amap_beiscsi_offload_params,
3334                       max_burst_length) / 32]);
3335         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3336                       max_send_data_segment_length, pwrb,
3337                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3338                       max_send_data_segment_length) / 32]);
3339         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3340                       first_burst_length,
3341                       pwrb,
3342                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3343                       first_burst_length) / 32]);
3344
3345         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3346                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3347                       erl) / 32] & OFFLD_PARAMS_ERL));
3348         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3349                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3350                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3351         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3352                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3353                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3354         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3355                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3356                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3357         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3358                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3359                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3360         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3361                       pwrb,
3362                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3363                       exp_statsn) / 32] + 1));
3364         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3365                       0x7);
3366         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3367                       pwrb, pwrb_handle->wrb_index);
3368         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3369                       pwrb, pwrb_handle->nxt_wrb_index);
3370         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3371                         session_state, pwrb, 0);
3372         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3373                       pwrb, 1);
3374         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3375                       pwrb, 0);
3376         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3377                       0);
3378
3379         mem_descr = phba->init_mem;
3380         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3381
3382         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3383                         pad_buffer_addr_hi, pwrb,
3384                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3385         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3386                         pad_buffer_addr_lo, pwrb,
3387                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3388
3389         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3390
3391         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3392         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3393                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3394         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3395
3396         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3397 }
3398
3399 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3400                               int *index, int *age)
3401 {
3402         *index = (int)itt;
3403         if (age)
3404                 *age = conn->session->age;
3405 }
3406
3407 /**
3408  * beiscsi_alloc_pdu - allocates pdu and related resources
3409  * @task: libiscsi task
3410  * @opcode: opcode of pdu for task
3411  *
3412  * This is called with the session lock held. It will allocate
3413  * the wrb and sgl if needed for the command. And it will prep
3414  * the pdu's itt. beiscsi_parse_pdu will later translate
3415  * the pdu itt to the libiscsi task itt.
3416  */
3417 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3418 {
3419         struct beiscsi_io_task *io_task = task->dd_data;
3420         struct iscsi_conn *conn = task->conn;
3421         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3422         struct beiscsi_hba *phba = beiscsi_conn->phba;
3423         struct hwi_wrb_context *pwrb_context;
3424         struct hwi_controller *phwi_ctrlr;
3425         itt_t itt;
3426         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3427         dma_addr_t paddr;
3428
3429         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3430                                           GFP_KERNEL, &paddr);
3431         if (!io_task->cmd_bhs)
3432                 return -ENOMEM;
3433         io_task->bhs_pa.u.a64.address = paddr;
3434         io_task->libiscsi_itt = (itt_t)task->itt;
3435         io_task->pwrb_handle = alloc_wrb_handle(phba,
3436                                                 beiscsi_conn->beiscsi_conn_cid -
3437                                                 phba->fw_config.iscsi_cid_start
3438                                                 );
3439         io_task->conn = beiscsi_conn;
3440
3441         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3442         task->hdr_max = sizeof(struct be_cmd_bhs);
3443
3444         if (task->sc) {
3445                 spin_lock(&phba->io_sgl_lock);
3446                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3447                 spin_unlock(&phba->io_sgl_lock);
3448                 if (!io_task->psgl_handle)
3449                         goto free_hndls;
3450         } else {
3451                 io_task->scsi_cmnd = NULL;
3452                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3453                         if (!beiscsi_conn->login_in_progress) {
3454                                 spin_lock(&phba->mgmt_sgl_lock);
3455                                 io_task->psgl_handle = (struct sgl_handle *)
3456                                                 alloc_mgmt_sgl_handle(phba);
3457                                 spin_unlock(&phba->mgmt_sgl_lock);
3458                                 if (!io_task->psgl_handle)
3459                                         goto free_hndls;
3460
3461                                 beiscsi_conn->login_in_progress = 1;
3462                                 beiscsi_conn->plogin_sgl_handle =
3463                                                         io_task->psgl_handle;
3464                         } else {
3465                                 io_task->psgl_handle =
3466                                                 beiscsi_conn->plogin_sgl_handle;
3467                         }
3468                 } else {
3469                         spin_lock(&phba->mgmt_sgl_lock);
3470                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3471                         spin_unlock(&phba->mgmt_sgl_lock);
3472                         if (!io_task->psgl_handle)
3473                                 goto free_hndls;
3474                 }
3475         }
3476         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3477                                  wrb_index << 16) | (unsigned int)
3478                                 (io_task->psgl_handle->sgl_index));
3479         io_task->pwrb_handle->pio_handle = task;
3480
3481         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3482         return 0;
3483
3484 free_hndls:
3485         phwi_ctrlr = phba->phwi_ctrlr;
3486         pwrb_context = &phwi_ctrlr->wrb_context[
3487                         beiscsi_conn->beiscsi_conn_cid -
3488                         phba->fw_config.iscsi_cid_start];
3489         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3490         io_task->pwrb_handle = NULL;
3491         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3492                       io_task->bhs_pa.u.a64.address);
3493         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3494         return -ENOMEM;
3495 }
3496
3497 static void beiscsi_cleanup_task(struct iscsi_task *task)
3498 {
3499         struct beiscsi_io_task *io_task = task->dd_data;
3500         struct iscsi_conn *conn = task->conn;
3501         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3502         struct beiscsi_hba *phba = beiscsi_conn->phba;
3503         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3504         struct hwi_wrb_context *pwrb_context;
3505         struct hwi_controller *phwi_ctrlr;
3506
3507         phwi_ctrlr = phba->phwi_ctrlr;
3508         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3509                         - phba->fw_config.iscsi_cid_start];
3510         if (io_task->pwrb_handle) {
3511                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3512                 io_task->pwrb_handle = NULL;
3513         }
3514
3515         if (io_task->cmd_bhs) {
3516                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3517                               io_task->bhs_pa.u.a64.address);
3518         }
3519
3520         if (task->sc) {
3521                 if (io_task->psgl_handle) {
3522                         spin_lock(&phba->io_sgl_lock);
3523                         free_io_sgl_handle(phba, io_task->psgl_handle);
3524                         spin_unlock(&phba->io_sgl_lock);
3525                         io_task->psgl_handle = NULL;
3526                 }
3527         } else {
3528                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3529                         return;
3530                 if (io_task->psgl_handle) {
3531                         spin_lock(&phba->mgmt_sgl_lock);
3532                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3533                         spin_unlock(&phba->mgmt_sgl_lock);
3534                         io_task->psgl_handle = NULL;
3535                 }
3536         }
3537 }
3538
3539 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3540                           unsigned int num_sg, unsigned int xferlen,
3541                           unsigned int writedir)
3542 {
3543
3544         struct beiscsi_io_task *io_task = task->dd_data;
3545         struct iscsi_conn *conn = task->conn;
3546         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3547         struct beiscsi_hba *phba = beiscsi_conn->phba;
3548         struct iscsi_wrb *pwrb = NULL;
3549         unsigned int doorbell = 0;
3550
3551         pwrb = io_task->pwrb_handle->pwrb;
3552         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3553         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3554
3555         if (writedir) {
3556                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3557                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3558                               &io_task->cmd_bhs->iscsi_data_pdu,
3559                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3560                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3561                               &io_task->cmd_bhs->iscsi_data_pdu,
3562                               ISCSI_OPCODE_SCSI_DATA_OUT);
3563                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3564                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3565                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3566                               INI_WR_CMD);
3567                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3568         } else {
3569                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3570                               INI_RD_CMD);
3571                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3572         }
3573         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3574                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3575                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3576
3577         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3578                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3579                                   lun[0]));
3580         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3581         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3582                       io_task->pwrb_handle->wrb_index);
3583         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3584                       be32_to_cpu(task->cmdsn));
3585         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3586                       io_task->psgl_handle->sgl_index);
3587
3588         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3589
3590         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3591                       io_task->pwrb_handle->nxt_wrb_index);
3592         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3593
3594         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3595         doorbell |= (io_task->pwrb_handle->wrb_index &
3596                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3597         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3598
3599         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3600         return 0;
3601 }
3602
3603 static int beiscsi_mtask(struct iscsi_task *task)
3604 {
3605         struct beiscsi_io_task *io_task = task->dd_data;
3606         struct iscsi_conn *conn = task->conn;
3607         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3608         struct beiscsi_hba *phba = beiscsi_conn->phba;
3609         struct iscsi_wrb *pwrb = NULL;
3610         unsigned int doorbell = 0;
3611         unsigned int cid;
3612
3613         cid = beiscsi_conn->beiscsi_conn_cid;
3614         pwrb = io_task->pwrb_handle->pwrb;
3615         memset(pwrb, 0, sizeof(*pwrb));
3616         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3617                       be32_to_cpu(task->cmdsn));
3618         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3619                       io_task->pwrb_handle->wrb_index);
3620         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3621                       io_task->psgl_handle->sgl_index);
3622
3623         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3624         case ISCSI_OP_LOGIN:
3625                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3626                               TGT_DM_CMD);
3627                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3628                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3629                 hwi_write_buffer(pwrb, task);
3630                 break;
3631         case ISCSI_OP_NOOP_OUT:
3632                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3633                               INI_RD_CMD);
3634                 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3635                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3636                 else
3637                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3638                 hwi_write_buffer(pwrb, task);
3639                 break;
3640         case ISCSI_OP_TEXT:
3641                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3642                               TGT_DM_CMD);
3643                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3644                 hwi_write_buffer(pwrb, task);
3645                 break;
3646         case ISCSI_OP_SCSI_TMFUNC:
3647                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3648                               INI_TMF_CMD);
3649                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3650                 hwi_write_buffer(pwrb, task);
3651                 break;
3652         case ISCSI_OP_LOGOUT:
3653                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3654                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3655                               HWH_TYPE_LOGOUT);
3656                 hwi_write_buffer(pwrb, task);
3657                 break;
3658
3659         default:
3660                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
3661                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3662                 return -EINVAL;
3663         }
3664
3665         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3666                       task->data_count);
3667         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3668                       io_task->pwrb_handle->nxt_wrb_index);
3669         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3670
3671         doorbell |= cid & DB_WRB_POST_CID_MASK;
3672         doorbell |= (io_task->pwrb_handle->wrb_index &
3673                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3674         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3675         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3676         return 0;
3677 }
3678
3679 static int beiscsi_task_xmit(struct iscsi_task *task)
3680 {
3681         struct beiscsi_io_task *io_task = task->dd_data;
3682         struct scsi_cmnd *sc = task->sc;
3683         struct scatterlist *sg;
3684         int num_sg;
3685         unsigned int  writedir = 0, xferlen = 0;
3686
3687         if (!sc)
3688                 return beiscsi_mtask(task);
3689
3690         io_task->scsi_cmnd = sc;
3691         num_sg = scsi_dma_map(sc);
3692         if (num_sg < 0) {
3693                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3694                 return num_sg;
3695         }
3696         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3697                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3698         xferlen = scsi_bufflen(sc);
3699         sg = scsi_sglist(sc);
3700         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3701                 writedir = 1;
3702                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
3703                          task->imm_count);
3704         } else
3705                 writedir = 0;
3706         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3707 }
3708
3709 static void beiscsi_remove(struct pci_dev *pcidev)
3710 {
3711         struct beiscsi_hba *phba = NULL;
3712         struct hwi_controller *phwi_ctrlr;
3713         struct hwi_context_memory *phwi_context;
3714         struct be_eq_obj *pbe_eq;
3715         unsigned int i, msix_vec;
3716
3717         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3718         if (!phba) {
3719                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
3720                 return;
3721         }
3722
3723         phwi_ctrlr = phba->phwi_ctrlr;
3724         phwi_context = phwi_ctrlr->phwi_ctxt;
3725         hwi_disable_intr(phba);
3726         if (phba->msix_enabled) {
3727                 for (i = 0; i <= phba->num_cpus; i++) {
3728                         msix_vec = phba->msix_entries[i].vector;
3729                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3730                 }
3731         } else
3732                 if (phba->pcidev->irq)
3733                         free_irq(phba->pcidev->irq, phba);
3734         pci_disable_msix(phba->pcidev);
3735         destroy_workqueue(phba->wq);
3736         if (blk_iopoll_enabled)
3737                 for (i = 0; i < phba->num_cpus; i++) {
3738                         pbe_eq = &phwi_context->be_eq[i];
3739                         blk_iopoll_disable(&pbe_eq->iopoll);
3740                 }
3741
3742         beiscsi_clean_port(phba);
3743         beiscsi_free_mem(phba);
3744         beiscsi_unmap_pci_function(phba);
3745         pci_free_consistent(phba->pcidev,
3746                             phba->ctrl.mbox_mem_alloced.size,
3747                             phba->ctrl.mbox_mem_alloced.va,
3748                             phba->ctrl.mbox_mem_alloced.dma);
3749         iscsi_host_remove(phba->shost);
3750         pci_dev_put(phba->pcidev);
3751         iscsi_host_free(phba->shost);
3752 }
3753
3754 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3755 {
3756         int i, status;
3757
3758         for (i = 0; i <= phba->num_cpus; i++)
3759                 phba->msix_entries[i].entry = i;
3760
3761         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3762                                  (phba->num_cpus + 1));
3763         if (!status)
3764                 phba->msix_enabled = true;
3765
3766         return;
3767 }
3768
3769 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3770                                 const struct pci_device_id *id)
3771 {
3772         struct beiscsi_hba *phba = NULL;
3773         struct hwi_controller *phwi_ctrlr;
3774         struct hwi_context_memory *phwi_context;
3775         struct be_eq_obj *pbe_eq;
3776         int ret, msix_vec, num_cpus, i;
3777
3778         ret = beiscsi_enable_pci(pcidev);
3779         if (ret < 0) {
3780                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3781                         " Failed to enable pci device\n");
3782                 return ret;
3783         }
3784
3785         phba = beiscsi_hba_alloc(pcidev);
3786         if (!phba) {
3787                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3788                         " Failed in beiscsi_hba_alloc\n");
3789                 goto disable_pci;
3790         }
3791
3792         switch (pcidev->device) {
3793         case BE_DEVICE_ID1:
3794         case OC_DEVICE_ID1:
3795         case OC_DEVICE_ID2:
3796                 phba->generation = BE_GEN2;
3797                 break;
3798         case BE_DEVICE_ID2:
3799         case OC_DEVICE_ID3:
3800                 phba->generation = BE_GEN3;
3801                 break;
3802         default:
3803                 phba->generation = 0;
3804         }
3805
3806         if (enable_msix)
3807                 num_cpus = find_num_cpus();
3808         else
3809                 num_cpus = 1;
3810         phba->num_cpus = num_cpus;
3811         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
3812
3813         if (enable_msix)
3814                 beiscsi_msix_enable(phba);
3815         ret = be_ctrl_init(phba, pcidev);
3816         if (ret) {
3817                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3818                                 "Failed in be_ctrl_init\n");
3819                 goto hba_free;
3820         }
3821
3822         spin_lock_init(&phba->io_sgl_lock);
3823         spin_lock_init(&phba->mgmt_sgl_lock);
3824         spin_lock_init(&phba->isr_lock);
3825         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3826         if (ret != 0) {
3827                 shost_printk(KERN_ERR, phba->shost,
3828                              "Error getting fw config\n");
3829                 goto free_port;
3830         }
3831         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3832         beiscsi_get_params(phba);
3833         phba->shost->can_queue = phba->params.ios_per_ctrl;
3834         ret = beiscsi_init_port(phba);
3835         if (ret < 0) {
3836                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3837                              "Failed in beiscsi_init_port\n");
3838                 goto free_port;
3839         }
3840
3841         for (i = 0; i < MAX_MCC_CMD ; i++) {
3842                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3843                 phba->ctrl.mcc_tag[i] = i + 1;
3844                 phba->ctrl.mcc_numtag[i + 1] = 0;
3845                 phba->ctrl.mcc_tag_available++;
3846         }
3847
3848         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3849
3850         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3851                  phba->shost->host_no);
3852         phba->wq = create_workqueue(phba->wq_name);
3853         if (!phba->wq) {
3854                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3855                                 "Failed to allocate work queue\n");
3856                 goto free_twq;
3857         }
3858
3859         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3860
3861         phwi_ctrlr = phba->phwi_ctrlr;
3862         phwi_context = phwi_ctrlr->phwi_ctxt;
3863         if (blk_iopoll_enabled) {
3864                 for (i = 0; i < phba->num_cpus; i++) {
3865                         pbe_eq = &phwi_context->be_eq[i];
3866                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3867                                         be_iopoll);
3868                         blk_iopoll_enable(&pbe_eq->iopoll);
3869                 }
3870         }
3871         ret = beiscsi_init_irqs(phba);
3872         if (ret < 0) {
3873                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3874                              "Failed to beiscsi_init_irqs\n");
3875                 goto free_blkenbld;
3876         }
3877         ret = hwi_enable_intr(phba);
3878         if (ret < 0) {
3879                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3880                              "Failed to hwi_enable_intr\n");
3881                 goto free_ctrlr;
3882         }
3883         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
3884         return 0;
3885
3886 free_ctrlr:
3887         if (phba->msix_enabled) {
3888                 for (i = 0; i <= phba->num_cpus; i++) {
3889                         msix_vec = phba->msix_entries[i].vector;
3890                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3891                 }
3892         } else
3893                 if (phba->pcidev->irq)
3894                         free_irq(phba->pcidev->irq, phba);
3895         pci_disable_msix(phba->pcidev);
3896 free_blkenbld:
3897         destroy_workqueue(phba->wq);
3898         if (blk_iopoll_enabled)
3899                 for (i = 0; i < phba->num_cpus; i++) {
3900                         pbe_eq = &phwi_context->be_eq[i];
3901                         blk_iopoll_disable(&pbe_eq->iopoll);
3902                 }
3903 free_twq:
3904         beiscsi_clean_port(phba);
3905         beiscsi_free_mem(phba);
3906 free_port:
3907         pci_free_consistent(phba->pcidev,
3908                             phba->ctrl.mbox_mem_alloced.size,
3909                             phba->ctrl.mbox_mem_alloced.va,
3910                            phba->ctrl.mbox_mem_alloced.dma);
3911         beiscsi_unmap_pci_function(phba);
3912 hba_free:
3913         iscsi_host_remove(phba->shost);
3914         pci_dev_put(phba->pcidev);
3915         iscsi_host_free(phba->shost);
3916 disable_pci:
3917         pci_disable_device(pcidev);
3918         return ret;
3919 }
3920
3921 struct iscsi_transport beiscsi_iscsi_transport = {
3922         .owner = THIS_MODULE,
3923         .name = DRV_NAME,
3924         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3925                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3926         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3927                 ISCSI_MAX_XMIT_DLENGTH |
3928                 ISCSI_HDRDGST_EN |
3929                 ISCSI_DATADGST_EN |
3930                 ISCSI_INITIAL_R2T_EN |
3931                 ISCSI_MAX_R2T |
3932                 ISCSI_IMM_DATA_EN |
3933                 ISCSI_FIRST_BURST |
3934                 ISCSI_MAX_BURST |
3935                 ISCSI_PDU_INORDER_EN |
3936                 ISCSI_DATASEQ_INORDER_EN |
3937                 ISCSI_ERL |
3938                 ISCSI_CONN_PORT |
3939                 ISCSI_CONN_ADDRESS |
3940                 ISCSI_EXP_STATSN |
3941                 ISCSI_PERSISTENT_PORT |
3942                 ISCSI_PERSISTENT_ADDRESS |
3943                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3944                 ISCSI_USERNAME | ISCSI_PASSWORD |
3945                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3946                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3947                 ISCSI_LU_RESET_TMO |
3948                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3949                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3950         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3951                                 ISCSI_HOST_INITIATOR_NAME,
3952         .create_session = beiscsi_session_create,
3953         .destroy_session = beiscsi_session_destroy,
3954         .create_conn = beiscsi_conn_create,
3955         .bind_conn = beiscsi_conn_bind,
3956         .destroy_conn = iscsi_conn_teardown,
3957         .set_param = beiscsi_set_param,
3958         .get_conn_param = beiscsi_conn_get_param,
3959         .get_session_param = iscsi_session_get_param,
3960         .get_host_param = beiscsi_get_host_param,
3961         .start_conn = beiscsi_conn_start,
3962         .stop_conn = iscsi_conn_stop,
3963         .send_pdu = iscsi_conn_send_pdu,
3964         .xmit_task = beiscsi_task_xmit,
3965         .cleanup_task = beiscsi_cleanup_task,
3966         .alloc_pdu = beiscsi_alloc_pdu,
3967         .parse_pdu_itt = beiscsi_parse_pdu,
3968         .get_stats = beiscsi_conn_get_stats,
3969         .ep_connect = beiscsi_ep_connect,
3970         .ep_poll = beiscsi_ep_poll,
3971         .ep_disconnect = beiscsi_ep_disconnect,
3972         .session_recovery_timedout = iscsi_session_recovery_timedout,
3973 };
3974
3975 static struct pci_driver beiscsi_pci_driver = {
3976         .name = DRV_NAME,
3977         .probe = beiscsi_dev_probe,
3978         .remove = beiscsi_remove,
3979         .id_table = beiscsi_pci_id_table
3980 };
3981
3982
3983 static int __init beiscsi_module_init(void)
3984 {
3985         int ret;
3986
3987         beiscsi_scsi_transport =
3988                         iscsi_register_transport(&beiscsi_iscsi_transport);
3989         if (!beiscsi_scsi_transport) {
3990                 SE_DEBUG(DBG_LVL_1,
3991                          "beiscsi_module_init - Unable to  register beiscsi"
3992                          "transport.\n");
3993                 return -ENOMEM;
3994         }
3995         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
3996                  &beiscsi_iscsi_transport);
3997
3998         ret = pci_register_driver(&beiscsi_pci_driver);
3999         if (ret) {
4000                 SE_DEBUG(DBG_LVL_1,
4001                          "beiscsi_module_init - Unable to  register"
4002                          "beiscsi pci driver.\n");
4003                 goto unregister_iscsi_transport;
4004         }
4005         return 0;
4006
4007 unregister_iscsi_transport:
4008         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4009         return ret;
4010 }
4011
4012 static void __exit beiscsi_module_exit(void)
4013 {
4014         pci_unregister_driver(&beiscsi_pci_driver);
4015         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4016 }
4017
4018 module_init(beiscsi_module_init);
4019 module_exit(beiscsi_module_exit);