2 * Marvell 88SE64xx/88SE94xx main function
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 * This file is licensed under GPLv2.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
30 if (task->lldd_task) {
31 struct mvs_slot_info *slot;
32 slot = task->lldd_task;
33 *tag = slot->slot_tag;
39 void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
41 void *bitmap = &mvi->tags;
42 clear_bit(tag, bitmap);
45 void mvs_tag_free(struct mvs_info *mvi, u32 tag)
47 mvs_tag_clear(mvi, tag);
50 void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
52 void *bitmap = &mvi->tags;
56 inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
58 unsigned int index, tag;
59 void *bitmap = &mvi->tags;
61 index = find_first_zero_bit(bitmap, mvi->tags_num);
63 if (tag >= mvi->tags_num)
64 return -SAS_QUEUE_FULL;
65 mvs_tag_set(mvi, tag);
70 void mvs_tag_init(struct mvs_info *mvi)
73 for (i = 0; i < mvi->tags_num; ++i)
74 mvs_tag_clear(mvi, i);
77 void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
85 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
91 for (i = 0; i < 16; i++) {
93 printk(KERN_DEBUG"%02X ", (u32)data[i]);
95 printk(KERN_DEBUG" ");
97 printk(KERN_DEBUG": ");
98 for (i = 0; i < run; i++)
99 printk(KERN_DEBUG"%c",
100 isalnum(data[i]) ? data[i] : '.');
101 printk(KERN_DEBUG"\n");
105 printk(KERN_DEBUG"\n");
109 static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
110 enum sas_protocol proto)
113 struct mvs_slot_info *slot = &mvi->slot_info[tag];
115 offset = slot->cmd_size + MVS_OAF_SZ +
116 MVS_CHIP_DISP->prd_size() * slot->n_elem;
117 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
119 mvs_hexdump(32, (u8 *) slot->response,
120 (u32) slot->buf_dma + offset);
124 static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
125 enum sas_protocol proto)
130 struct mvs_slot_info *slot = &mvi->slot_info[tag];
133 sz = MVS_CHIP_SLOT_SZ;
136 dev_printk(KERN_DEBUG, mvi->dev,
137 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
138 dev_printk(KERN_DEBUG, mvi->dev,
139 "Delivery Queue Base Address=0x%llX (PA)"
140 "(tx_dma=0x%llX), Entry=%04d\n",
141 addr, (unsigned long long)mvi->tx_dma, w_ptr);
142 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
143 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
145 addr = mvi->slot_dma;
146 dev_printk(KERN_DEBUG, mvi->dev,
147 "Command List Base Address=0x%llX (PA)"
148 "(slot_dma=0x%llX), Header=%03d\n",
149 addr, (unsigned long long)slot->buf_dma, tag);
150 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
152 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
153 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
154 /*1.command table area */
155 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
156 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
157 /*2.open address frame area */
158 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
159 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
160 (u32) slot->buf_dma + slot->cmd_size);
162 mvs_hba_sb_dump(mvi, tag, proto);
164 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
165 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
166 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
167 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
171 static void mvs_hba_cq_dump(struct mvs_info *mvi)
175 void __iomem *regs = mvi->regs;
176 u32 entry = mvi->rx_cons + 1;
177 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
179 /*Completion Queue */
180 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
181 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
182 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
183 dev_printk(KERN_DEBUG, mvi->dev,
184 "Completion List Base Address=0x%llX (PA), "
185 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
186 addr, entry - 1, mvi->rx[0]);
187 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
188 mvi->rx_dma + sizeof(u32) * entry);
192 void mvs_get_sas_addr(void *buf, u32 buflen)
194 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
197 struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
199 unsigned long i = 0, j = 0, hi = 0;
200 struct sas_ha_struct *sha = dev->port->ha;
201 struct mvs_info *mvi = NULL;
202 struct asd_sas_phy *phy;
204 while (sha->sas_port[i]) {
205 if (sha->sas_port[i] == dev->port) {
206 phy = container_of(sha->sas_port[i]->phy_list.next,
207 struct asd_sas_phy, port_phy_el);
209 while (sha->sas_phy[j]) {
210 if (sha->sas_phy[j] == phy)
218 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
219 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
226 int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
228 unsigned long i = 0, j = 0, n = 0, num = 0;
229 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
230 struct mvs_info *mvi = mvi_dev->mvi_info;
231 struct sas_ha_struct *sha = dev->port->ha;
233 while (sha->sas_port[i]) {
234 if (sha->sas_port[i] == dev->port) {
235 struct asd_sas_phy *phy;
236 list_for_each_entry(phy,
237 &sha->sas_port[i]->phy_list, port_phy_el) {
239 while (sha->sas_phy[j]) {
240 if (sha->sas_phy[j] == phy)
244 phyno[n] = (j >= mvi->chip->n_phy) ?
245 (j - mvi->chip->n_phy) : j;
256 struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
260 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
261 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
264 if (mvi->devices[dev_no].taskfileset == reg_set)
265 return &mvi->devices[dev_no];
270 static inline void mvs_free_reg_set(struct mvs_info *mvi,
271 struct mvs_device *dev)
274 mv_printk("device has been free.\n");
277 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
279 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
282 static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
283 struct mvs_device *dev)
285 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
287 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
290 void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
293 for_each_phy(phy_mask, phy_mask, no) {
296 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
300 /* FIXME: locking? */
301 int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
304 int rc = 0, phy_id = sas_phy->id;
306 struct sas_ha_struct *sha = sas_phy->ha;
307 struct mvs_info *mvi = NULL;
309 while (sha->sas_phy[i]) {
310 if (sha->sas_phy[i] == sas_phy)
314 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
315 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
318 case PHY_FUNC_SET_LINK_RATE:
319 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
322 case PHY_FUNC_HARD_RESET:
323 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
324 if (tmp & PHY_RST_HARD)
326 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
329 case PHY_FUNC_LINK_RESET:
330 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
331 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
334 case PHY_FUNC_DISABLE:
335 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
337 case PHY_FUNC_RELEASE_SPINUP_HOLD:
345 void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
346 u32 off_lo, u32 off_hi, u64 sas_addr)
348 u32 lo = (u32)sas_addr;
349 u32 hi = (u32)(sas_addr>>32);
351 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
352 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
353 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
354 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
357 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
359 struct mvs_phy *phy = &mvi->phy[i];
360 struct asd_sas_phy *sas_phy = &phy->sas_phy;
361 struct sas_ha_struct *sas_ha;
362 if (!phy->phy_attached)
365 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
366 && phy->phy_type & PORT_TYPE_SAS) {
371 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
374 struct sas_phy *sphy = sas_phy->phy;
376 sphy->negotiated_linkrate = sas_phy->linkrate;
377 sphy->minimum_linkrate = phy->minimum_linkrate;
378 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
379 sphy->maximum_linkrate = phy->maximum_linkrate;
380 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
383 if (phy->phy_type & PORT_TYPE_SAS) {
384 struct sas_identify_frame *id;
386 id = (struct sas_identify_frame *)phy->frame_rcvd;
387 id->dev_type = phy->identify.device_type;
388 id->initiator_bits = SAS_PROTOCOL_ALL;
389 id->target_bits = phy->identify.target_port_protocols;
390 } else if (phy->phy_type & PORT_TYPE_SATA) {
393 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
395 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
397 mvi->sas->notify_port_event(sas_phy,
401 int mvs_slave_alloc(struct scsi_device *scsi_dev)
403 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
404 if (dev_is_sata(dev)) {
405 /* We don't need to rescan targets
406 * if REPORT_LUNS request is failed
408 if (scsi_dev->lun > 0)
410 scsi_dev->tagged_supported = 1;
413 return sas_slave_alloc(scsi_dev);
416 int mvs_slave_configure(struct scsi_device *sdev)
418 struct domain_device *dev = sdev_to_domain_dev(sdev);
419 int ret = sas_slave_configure(sdev);
423 if (dev_is_sata(dev)) {
424 /* may set PIO mode */
426 struct ata_port *ap = dev->sata_dev.ap;
427 struct ata_device *adev = ap->link.device;
428 adev->flags |= ATA_DFLAG_NCQ_OFF;
429 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
435 void mvs_scan_start(struct Scsi_Host *shost)
438 unsigned short core_nr;
439 struct mvs_info *mvi;
440 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
442 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
444 for (j = 0; j < core_nr; j++) {
445 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
446 for (i = 0; i < mvi->chip->n_phy; ++i)
447 mvs_bytes_dmaed(mvi, i);
451 int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
453 /* give the phy enabling interrupt event time to come in (1s
454 * is empirically about all it takes) */
457 /* Wait for discovery to finish */
458 scsi_flush_work(shost);
462 static int mvs_task_prep_smp(struct mvs_info *mvi,
463 struct mvs_task_exec_info *tei)
466 struct sas_task *task = tei->task;
467 struct mvs_cmd_hdr *hdr = tei->hdr;
468 struct domain_device *dev = task->dev;
469 struct asd_sas_port *sas_port = dev->port;
470 struct scatterlist *sg_req, *sg_resp;
471 u32 req_len, resp_len, tag = tei->tag;
474 dma_addr_t buf_tmp_dma;
476 struct mvs_slot_info *slot = &mvi->slot_info[tag];
477 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
483 * DMA-map SMP request, response buffers
485 sg_req = &task->smp_task.smp_req;
486 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
489 req_len = sg_dma_len(sg_req);
491 sg_resp = &task->smp_task.smp_resp;
492 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
497 resp_len = SB_RFB_MAX;
499 /* must be in dwords */
500 if ((req_len & 0x3) || (resp_len & 0x3)) {
506 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
509 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
511 buf_tmp_dma = slot->buf_dma;
515 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
517 buf_tmp_dma += req_len;
518 slot->cmd_size = req_len;
520 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
523 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
525 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
527 buf_tmp += MVS_OAF_SZ;
528 buf_tmp_dma += MVS_OAF_SZ;
530 /* region 3: PRD table *********************************** */
533 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
537 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
541 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
542 slot->response = buf_tmp;
543 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
544 if (mvi->flags & MVF_FLAG_SOC)
545 hdr->reserved[0] = 0;
548 * Fill in TX ring and command slot header
550 slot->tx = mvi->tx_prod;
551 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
553 (sas_port->phy_mask << TXQ_PHY_SHIFT));
556 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
557 hdr->tags = cpu_to_le32(tag);
560 /* generate open address frame hdr (first 12 bytes) */
561 /* initiator, SMP, ftype 1h */
562 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
563 buf_oaf[1] = dev->linkrate & 0xf;
564 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
565 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
567 /* fill in PRD (scatter/gather) table, if any */
568 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
572 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
573 memcpy(buf_cmd, from + sg_req->offset, req_len);
574 kunmap_atomic(from, KM_IRQ0);
579 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
582 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
587 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
589 struct ata_queued_cmd *qc = task->uldd_task;
592 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
593 qc->tf.command == ATA_CMD_FPDMA_READ) {
602 static int mvs_task_prep_ata(struct mvs_info *mvi,
603 struct mvs_task_exec_info *tei)
605 struct sas_task *task = tei->task;
606 struct domain_device *dev = task->dev;
607 struct mvs_device *mvi_dev = dev->lldd_dev;
608 struct mvs_cmd_hdr *hdr = tei->hdr;
609 struct asd_sas_port *sas_port = dev->port;
610 struct mvs_slot_info *slot;
612 u32 tag = tei->tag, hdr_tag;
615 u8 *buf_cmd, *buf_oaf;
616 dma_addr_t buf_tmp_dma;
617 u32 i, req_len, resp_len;
618 const u32 max_resp_len = SB_RFB_MAX;
620 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
621 mv_dprintk("Have not enough regiset for dev %d.\n",
625 slot = &mvi->slot_info[tag];
626 slot->tx = mvi->tx_prod;
627 del_q = TXQ_MODE_I | tag |
628 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
629 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
630 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
631 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
633 if (task->data_dir == DMA_FROM_DEVICE)
634 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
636 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
638 if (task->ata_task.use_ncq)
640 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
641 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
645 /* FIXME: fill in port multiplier number */
647 hdr->flags = cpu_to_le32(flags);
649 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
650 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
651 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
655 hdr->tags = cpu_to_le32(hdr_tag);
657 hdr->data_len = cpu_to_le32(task->total_xfer_len);
660 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
663 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
664 buf_cmd = buf_tmp = slot->buf;
665 buf_tmp_dma = slot->buf_dma;
667 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
669 buf_tmp += MVS_ATA_CMD_SZ;
670 buf_tmp_dma += MVS_ATA_CMD_SZ;
672 slot->cmd_size = MVS_ATA_CMD_SZ;
675 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
676 /* used for STP. unused for SATA? */
678 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
680 buf_tmp += MVS_OAF_SZ;
681 buf_tmp_dma += MVS_OAF_SZ;
683 /* region 3: PRD table ********************************************* */
687 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
690 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
695 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
696 /* FIXME: probably unused, for SATA. kept here just in case
697 * we get a STP/SATA error information record
699 slot->response = buf_tmp;
700 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
701 if (mvi->flags & MVF_FLAG_SOC)
702 hdr->reserved[0] = 0;
704 req_len = sizeof(struct host_to_dev_fis);
705 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
706 sizeof(struct mvs_err_info) - i;
708 /* request, response lengths */
709 resp_len = min(resp_len, max_resp_len);
710 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
712 if (likely(!task->ata_task.device_control_reg_update))
713 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
714 /* fill in command FIS and ATAPI CDB */
715 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
716 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
717 memcpy(buf_cmd + STP_ATAPI_CMD,
718 task->ata_task.atapi_packet, 16);
720 /* generate open address frame hdr (first 12 bytes) */
721 /* initiator, STP, ftype 1h */
722 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
723 buf_oaf[1] = dev->linkrate & 0xf;
724 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
725 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
727 /* fill in PRD (scatter/gather) table, if any */
728 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
730 if (task->data_dir == DMA_FROM_DEVICE)
731 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
732 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
737 static int mvs_task_prep_ssp(struct mvs_info *mvi,
738 struct mvs_task_exec_info *tei, int is_tmf,
739 struct mvs_tmf_task *tmf)
741 struct sas_task *task = tei->task;
742 struct mvs_cmd_hdr *hdr = tei->hdr;
743 struct mvs_port *port = tei->port;
744 struct domain_device *dev = task->dev;
745 struct mvs_device *mvi_dev = dev->lldd_dev;
746 struct asd_sas_port *sas_port = dev->port;
747 struct mvs_slot_info *slot;
749 struct ssp_frame_hdr *ssp_hdr;
751 u8 *buf_cmd, *buf_oaf, fburst = 0;
752 dma_addr_t buf_tmp_dma;
754 u32 resp_len, req_len, i, tag = tei->tag;
755 const u32 max_resp_len = SB_RFB_MAX;
758 slot = &mvi->slot_info[tag];
760 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
761 sas_port->phy_mask) & TXQ_PHY_MASK;
763 slot->tx = mvi->tx_prod;
764 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
765 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
766 (phy_mask << TXQ_PHY_SHIFT));
769 if (task->ssp_task.enable_first_burst) {
774 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
775 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
776 hdr->tags = cpu_to_le32(tag);
777 hdr->data_len = cpu_to_le32(task->total_xfer_len);
780 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
783 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
784 buf_cmd = buf_tmp = slot->buf;
785 buf_tmp_dma = slot->buf_dma;
787 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
789 buf_tmp += MVS_SSP_CMD_SZ;
790 buf_tmp_dma += MVS_SSP_CMD_SZ;
792 slot->cmd_size = MVS_SSP_CMD_SZ;
795 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
797 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
799 buf_tmp += MVS_OAF_SZ;
800 buf_tmp_dma += MVS_OAF_SZ;
802 /* region 3: PRD table ********************************************* */
805 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
809 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
813 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
814 slot->response = buf_tmp;
815 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
816 if (mvi->flags & MVF_FLAG_SOC)
817 hdr->reserved[0] = 0;
819 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
820 sizeof(struct mvs_err_info) - i;
821 resp_len = min(resp_len, max_resp_len);
823 req_len = sizeof(struct ssp_frame_hdr) + 28;
825 /* request, response lengths */
826 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
828 /* generate open address frame hdr (first 12 bytes) */
829 /* initiator, SSP, ftype 1h */
830 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
831 buf_oaf[1] = dev->linkrate & 0xf;
832 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
833 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
835 /* fill in SSP frame header (Command Table.SSP frame header) */
836 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
839 ssp_hdr->frame_type = SSP_TASK;
841 ssp_hdr->frame_type = SSP_COMMAND;
843 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
844 HASHED_SAS_ADDR_SIZE);
845 memcpy(ssp_hdr->hashed_src_addr,
846 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
847 ssp_hdr->tag = cpu_to_be16(tag);
849 /* fill in IU for TASK and Command Frame */
850 buf_cmd += sizeof(*ssp_hdr);
851 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
853 if (ssp_hdr->frame_type != SSP_TASK) {
854 buf_cmd[9] = fburst | task->ssp_task.task_attr |
855 (task->ssp_task.task_prio << 3);
856 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
858 buf_cmd[10] = tmf->tmf;
863 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
865 tmf->tag_of_task_to_be_managed & 0xff;
871 /* fill in PRD (scatter/gather) table, if any */
872 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
876 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
877 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
878 struct mvs_tmf_task *tmf, int *pass)
880 struct domain_device *dev = task->dev;
881 struct mvs_device *mvi_dev = dev->lldd_dev;
882 struct mvs_task_exec_info tei;
883 struct mvs_slot_info *slot;
884 u32 tag = 0xdeadbeef, n_elem = 0;
888 struct task_status_struct *tsm = &task->task_status;
890 tsm->resp = SAS_TASK_UNDELIVERED;
891 tsm->stat = SAS_PHY_DOWN;
893 * libsas will use dev->port, should
894 * not call task_done for sata
896 if (dev->dev_type != SATA_DEV)
897 task->task_done(task);
901 if (DEV_IS_GONE(mvi_dev)) {
903 mv_dprintk("device %d not ready.\n",
906 mv_dprintk("device %016llx not ready.\n",
907 SAS_ADDR(dev->sas_addr));
912 tei.port = dev->port->lldd_port;
913 if (tei.port && !tei.port->port_attached && !tmf) {
914 if (sas_protocol_ata(task->task_proto)) {
915 struct task_status_struct *ts = &task->task_status;
916 mv_dprintk("SATA/STP port %d does not attach"
917 "device.\n", dev->port->id);
918 ts->resp = SAS_TASK_COMPLETE;
919 ts->stat = SAS_PHY_DOWN;
921 task->task_done(task);
924 struct task_status_struct *ts = &task->task_status;
925 mv_dprintk("SAS port %d does not attach"
926 "device.\n", dev->port->id);
927 ts->resp = SAS_TASK_UNDELIVERED;
928 ts->stat = SAS_PHY_DOWN;
929 task->task_done(task);
934 if (!sas_protocol_ata(task->task_proto)) {
935 if (task->num_scatter) {
936 n_elem = dma_map_sg(mvi->dev,
946 n_elem = task->num_scatter;
949 rc = mvs_tag_alloc(mvi, &tag);
953 slot = &mvi->slot_info[tag];
955 task->lldd_task = NULL;
956 slot->n_elem = n_elem;
957 slot->slot_tag = tag;
959 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
962 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
965 tei.hdr = &mvi->slot[tag];
968 switch (task->task_proto) {
969 case SAS_PROTOCOL_SMP:
970 rc = mvs_task_prep_smp(mvi, &tei);
972 case SAS_PROTOCOL_SSP:
973 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
975 case SAS_PROTOCOL_SATA:
976 case SAS_PROTOCOL_STP:
977 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
978 rc = mvs_task_prep_ata(mvi, &tei);
981 dev_printk(KERN_ERR, mvi->dev,
982 "unknown sas_task proto: 0x%x\n",
989 mv_dprintk("rc is %x\n", rc);
990 goto err_out_slot_buf;
993 slot->port = tei.port;
994 task->lldd_task = slot;
995 list_add_tail(&slot->entry, &tei.port->list);
996 spin_lock(&task->task_state_lock);
997 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
998 spin_unlock(&task->task_state_lock);
1000 mvs_hba_memory_dump(mvi, tag, task->task_proto);
1001 mvi_dev->running_req++;
1003 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1008 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1010 mvs_tag_free(mvi, tag);
1013 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
1014 if (!sas_protocol_ata(task->task_proto))
1016 dma_unmap_sg(mvi->dev, task->scatter, n_elem,
1022 static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
1024 struct mvs_task_list *first = NULL;
1026 for (; *num > 0; --*num) {
1027 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
1032 INIT_LIST_HEAD(&mvs_list->list);
1036 list_add_tail(&mvs_list->list, &first->list);
1043 static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
1046 struct list_head *pos, *a;
1047 struct mvs_task_list *mlist = NULL;
1049 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
1051 list_for_each_safe(pos, a, &list) {
1053 mlist = list_entry(pos, struct mvs_task_list, list);
1054 kmem_cache_free(mvs_task_list_cache, mlist);
1058 static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1059 struct completion *completion, int is_tmf,
1060 struct mvs_tmf_task *tmf)
1062 struct domain_device *dev = task->dev;
1063 struct mvs_info *mvi = NULL;
1066 unsigned long flags = 0;
1068 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
1070 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1071 spin_unlock_irq(dev->sata_dev.ap->lock);
1073 spin_lock_irqsave(&mvi->lock, flags);
1074 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
1076 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1079 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1080 (MVS_CHIP_SLOT_SZ - 1));
1081 spin_unlock_irqrestore(&mvi->lock, flags);
1083 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1084 spin_lock_irq(dev->sata_dev.ap->lock);
1089 static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1090 struct completion *completion, int is_tmf,
1091 struct mvs_tmf_task *tmf)
1093 struct domain_device *dev = task->dev;
1094 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
1095 struct mvs_info *mvi = NULL;
1096 struct sas_task *t = task;
1097 struct mvs_task_list *mvs_list = NULL, *a;
1102 unsigned long flags = 0;
1104 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
1106 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
1111 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
1113 list_for_each_entry(a, &q, list) {
1115 t = list_entry(t->list.next, struct sas_task, list);
1118 list_for_each_entry(a, &q , list) {
1121 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
1123 spin_lock_irqsave(&mvi->lock, flags);
1124 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
1126 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1127 spin_unlock_irqrestore(&mvi->lock, flags);
1130 if (likely(pass[0]))
1131 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
1132 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1134 if (likely(pass[1]))
1135 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
1136 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1142 mvs_task_free_list(mvs_list);
1147 int mvs_queue_command(struct sas_task *task, const int num,
1150 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1151 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
1153 if (sas->lldd_max_execute_num < 2)
1154 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1156 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1159 static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1161 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1162 mvs_tag_clear(mvi, slot_idx);
1165 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1166 struct mvs_slot_info *slot, u32 slot_idx)
1170 if (!sas_protocol_ata(task->task_proto))
1172 dma_unmap_sg(mvi->dev, task->scatter,
1173 slot->n_elem, task->data_dir);
1175 switch (task->task_proto) {
1176 case SAS_PROTOCOL_SMP:
1177 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
1178 PCI_DMA_FROMDEVICE);
1179 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1183 case SAS_PROTOCOL_SATA:
1184 case SAS_PROTOCOL_STP:
1185 case SAS_PROTOCOL_SSP:
1192 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1195 list_del_init(&slot->entry);
1196 task->lldd_task = NULL;
1199 slot->slot_tag = 0xFFFFFFFF;
1200 mvs_slot_free(mvi, slot_idx);
1203 static void mvs_update_wideport(struct mvs_info *mvi, int i)
1205 struct mvs_phy *phy = &mvi->phy[i];
1206 struct mvs_port *port = phy->port;
1209 for_each_phy(port->wide_port_phymap, j, no) {
1211 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1213 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1214 port->wide_port_phymap);
1216 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1218 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1224 static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1227 struct mvs_phy *phy = &mvi->phy[i];
1228 struct mvs_port *port = phy->port;
1230 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1231 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1233 phy->phy_attached = 1;
1238 if (phy->phy_type & PORT_TYPE_SAS) {
1239 port->wide_port_phymap &= ~(1U << i);
1240 if (!port->wide_port_phymap)
1241 port->port_attached = 0;
1242 mvs_update_wideport(mvi, i);
1243 } else if (phy->phy_type & PORT_TYPE_SATA)
1244 port->port_attached = 0;
1246 phy->phy_attached = 0;
1247 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1252 static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1254 u32 *s = (u32 *) buf;
1259 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1260 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1262 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1263 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1265 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1266 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1268 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1269 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1271 /* Workaround: take some ATAPI devices for ATA */
1272 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1273 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1278 static u32 mvs_is_sig_fis_received(u32 irq_status)
1280 return irq_status & PHYEV_SIG_FIS;
1283 static void mvs_sig_remove_timer(struct mvs_phy *phy)
1285 if (phy->timer.function)
1286 del_timer(&phy->timer);
1287 phy->timer.function = NULL;
1290 void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1292 struct mvs_phy *phy = &mvi->phy[i];
1293 struct sas_identify_frame *id;
1295 id = (struct sas_identify_frame *)phy->frame_rcvd;
1298 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1299 phy->phy_status = mvs_is_phy_ready(mvi, i);
1302 if (phy->phy_status) {
1304 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1306 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1308 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1309 if (phy->phy_type & PORT_TYPE_SATA) {
1310 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1311 if (mvs_is_sig_fis_received(phy->irq_status)) {
1312 mvs_sig_remove_timer(phy);
1313 phy->phy_attached = 1;
1314 phy->att_dev_sas_addr =
1315 i + mvi->id * mvi->chip->n_phy;
1317 sas_phy->oob_mode = SATA_OOB_MODE;
1318 phy->frame_rcvd_size =
1319 sizeof(struct dev_to_host_fis);
1320 mvs_get_d2h_reg(mvi, i, id);
1323 dev_printk(KERN_DEBUG, mvi->dev,
1324 "Phy%d : No sig fis\n", i);
1325 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1326 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1327 tmp | PHYEV_SIG_FIS);
1328 phy->phy_attached = 0;
1329 phy->phy_type &= ~PORT_TYPE_SATA;
1332 } else if (phy->phy_type & PORT_TYPE_SAS
1333 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1334 phy->phy_attached = 1;
1335 phy->identify.device_type =
1336 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1338 if (phy->identify.device_type == SAS_END_DEV)
1339 phy->identify.target_port_protocols =
1341 else if (phy->identify.device_type != NO_DEVICE)
1342 phy->identify.target_port_protocols =
1345 sas_phy->oob_mode = SAS_OOB_MODE;
1346 phy->frame_rcvd_size =
1347 sizeof(struct sas_identify_frame);
1349 memcpy(sas_phy->attached_sas_addr,
1350 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1352 if (MVS_CHIP_DISP->phy_work_around)
1353 MVS_CHIP_DISP->phy_work_around(mvi, i);
1355 mv_dprintk("port %d attach dev info is %x\n",
1356 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1357 mv_dprintk("port %d attach sas addr is %llx\n",
1358 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1361 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1364 static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1366 struct sas_ha_struct *sas_ha = sas_phy->ha;
1367 struct mvs_info *mvi = NULL; int i = 0, hi;
1368 struct mvs_phy *phy = sas_phy->lldd_phy;
1369 struct asd_sas_port *sas_port = sas_phy->port;
1370 struct mvs_port *port;
1371 unsigned long flags = 0;
1375 while (sas_ha->sas_phy[i]) {
1376 if (sas_ha->sas_phy[i] == sas_phy)
1380 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1381 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1382 if (sas_port->id >= mvi->chip->n_phy)
1383 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1385 port = &mvi->port[sas_port->id];
1387 spin_lock_irqsave(&mvi->lock, flags);
1388 port->port_attached = 1;
1390 sas_port->lldd_port = port;
1391 if (phy->phy_type & PORT_TYPE_SAS) {
1392 port->wide_port_phymap = sas_port->phy_mask;
1393 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1394 mvs_update_wideport(mvi, sas_phy->id);
1397 spin_unlock_irqrestore(&mvi->lock, flags);
1400 static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1402 struct domain_device *dev;
1403 struct mvs_phy *phy = sas_phy->lldd_phy;
1404 struct mvs_info *mvi = phy->mvi;
1405 struct asd_sas_port *port = sas_phy->port;
1408 while (phy != &mvi->phy[phy_no]) {
1410 if (phy_no >= MVS_MAX_PHYS)
1413 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1414 mvs_do_release_task(phy->mvi, phy_no, NULL);
1419 void mvs_port_formed(struct asd_sas_phy *sas_phy)
1421 mvs_port_notify_formed(sas_phy, 1);
1424 void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1426 mvs_port_notify_deformed(sas_phy, 1);
1429 struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1432 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1433 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1434 mvi->devices[dev].device_id = dev;
1435 return &mvi->devices[dev];
1439 if (dev == MVS_MAX_DEVICES)
1440 mv_printk("max support %d devices, ignore ..\n",
1446 void mvs_free_dev(struct mvs_device *mvi_dev)
1448 u32 id = mvi_dev->device_id;
1449 memset(mvi_dev, 0, sizeof(*mvi_dev));
1450 mvi_dev->device_id = id;
1451 mvi_dev->dev_type = NO_DEVICE;
1452 mvi_dev->dev_status = MVS_DEV_NORMAL;
1453 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1456 int mvs_dev_found_notify(struct domain_device *dev, int lock)
1458 unsigned long flags = 0;
1460 struct mvs_info *mvi = NULL;
1461 struct domain_device *parent_dev = dev->parent;
1462 struct mvs_device *mvi_device;
1464 mvi = mvs_find_dev_mvi(dev);
1467 spin_lock_irqsave(&mvi->lock, flags);
1469 mvi_device = mvs_alloc_dev(mvi);
1474 dev->lldd_dev = mvi_device;
1475 mvi_device->dev_status = MVS_DEV_NORMAL;
1476 mvi_device->dev_type = dev->dev_type;
1477 mvi_device->mvi_info = mvi;
1478 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1480 u8 phy_num = parent_dev->ex_dev.num_phys;
1482 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1483 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1484 if (SAS_ADDR(phy->attached_sas_addr) ==
1485 SAS_ADDR(dev->sas_addr)) {
1486 mvi_device->attached_phy = phy_id;
1491 if (phy_id == phy_num) {
1492 mv_printk("Error: no attached dev:%016llx"
1494 SAS_ADDR(dev->sas_addr),
1495 SAS_ADDR(parent_dev->sas_addr));
1502 spin_unlock_irqrestore(&mvi->lock, flags);
1506 int mvs_dev_found(struct domain_device *dev)
1508 return mvs_dev_found_notify(dev, 1);
1511 void mvs_dev_gone_notify(struct domain_device *dev)
1513 unsigned long flags = 0;
1514 struct mvs_device *mvi_dev = dev->lldd_dev;
1515 struct mvs_info *mvi = mvi_dev->mvi_info;
1517 spin_lock_irqsave(&mvi->lock, flags);
1520 mv_dprintk("found dev[%d:%x] is gone.\n",
1521 mvi_dev->device_id, mvi_dev->dev_type);
1522 mvs_release_task(mvi, dev);
1523 mvs_free_reg_set(mvi, mvi_dev);
1524 mvs_free_dev(mvi_dev);
1526 mv_dprintk("found dev has gone.\n");
1528 dev->lldd_dev = NULL;
1530 spin_unlock_irqrestore(&mvi->lock, flags);
1534 void mvs_dev_gone(struct domain_device *dev)
1536 mvs_dev_gone_notify(dev);
1539 static struct sas_task *mvs_alloc_task(void)
1541 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1544 INIT_LIST_HEAD(&task->list);
1545 spin_lock_init(&task->task_state_lock);
1546 task->task_state_flags = SAS_TASK_STATE_PENDING;
1547 init_timer(&task->timer);
1548 init_completion(&task->completion);
1553 static void mvs_free_task(struct sas_task *task)
1556 BUG_ON(!list_empty(&task->list));
1561 static void mvs_task_done(struct sas_task *task)
1563 if (!del_timer(&task->timer))
1565 complete(&task->completion);
1568 static void mvs_tmf_timedout(unsigned long data)
1570 struct sas_task *task = (struct sas_task *)data;
1572 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1573 complete(&task->completion);
1577 #define MVS_TASK_TIMEOUT 20
1578 static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1579 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1582 struct sas_task *task = NULL;
1584 for (retry = 0; retry < 3; retry++) {
1585 task = mvs_alloc_task();
1590 task->task_proto = dev->tproto;
1592 memcpy(&task->ssp_task, parameter, para_len);
1593 task->task_done = mvs_task_done;
1595 task->timer.data = (unsigned long) task;
1596 task->timer.function = mvs_tmf_timedout;
1597 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1598 add_timer(&task->timer);
1600 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1603 del_timer(&task->timer);
1604 mv_printk("executing internel task failed:%d\n", res);
1608 wait_for_completion(&task->completion);
1609 res = -TMF_RESP_FUNC_FAILED;
1610 /* Even TMF timed out, return direct. */
1611 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1612 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1613 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1618 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1619 task->task_status.stat == SAM_STAT_GOOD) {
1620 res = TMF_RESP_FUNC_COMPLETE;
1624 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1625 task->task_status.stat == SAS_DATA_UNDERRUN) {
1626 /* no error, but return the number of bytes of
1628 res = task->task_status.residual;
1632 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1633 task->task_status.stat == SAS_DATA_OVERRUN) {
1634 mv_dprintk("blocked task error.\n");
1638 mv_dprintk(" task to dev %016llx response: 0x%x "
1640 SAS_ADDR(dev->sas_addr),
1641 task->task_status.resp,
1642 task->task_status.stat);
1643 mvs_free_task(task);
1649 BUG_ON(retry == 3 && task != NULL);
1651 mvs_free_task(task);
1655 static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1656 u8 *lun, struct mvs_tmf_task *tmf)
1658 struct sas_ssp_task ssp_task;
1659 DECLARE_COMPLETION_ONSTACK(completion);
1660 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1661 return TMF_RESP_FUNC_ESUPP;
1663 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1665 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1666 sizeof(ssp_task), tmf);
1670 /* Standard mandates link reset for ATA (type 0)
1671 and hard reset for SSP (type 1) , only for RECOVERY */
1672 static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1675 struct sas_phy *phy = sas_find_local_phy(dev);
1676 int reset_type = (dev->dev_type == SATA_DEV ||
1677 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1678 rc = sas_phy_reset(phy, reset_type);
1683 /* mandatory SAM-3 */
1684 int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1686 unsigned long flags;
1687 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1688 struct mvs_tmf_task tmf_task;
1689 struct mvs_device * mvi_dev = dev->lldd_dev;
1690 struct mvs_info *mvi = mvi_dev->mvi_info;
1692 tmf_task.tmf = TMF_LU_RESET;
1693 mvi_dev->dev_status = MVS_DEV_EH;
1694 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1695 if (rc == TMF_RESP_FUNC_COMPLETE) {
1696 num = mvs_find_dev_phyno(dev, phyno);
1697 spin_lock_irqsave(&mvi->lock, flags);
1698 for (i = 0; i < num; i++)
1699 mvs_release_task(mvi, dev);
1700 spin_unlock_irqrestore(&mvi->lock, flags);
1702 /* If failed, fall-through I_T_Nexus reset */
1703 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1704 mvi_dev->device_id, rc);
1708 int mvs_I_T_nexus_reset(struct domain_device *dev)
1710 unsigned long flags;
1711 int rc = TMF_RESP_FUNC_FAILED;
1712 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1713 struct mvs_info *mvi = mvi_dev->mvi_info;
1715 if (mvi_dev->dev_status != MVS_DEV_EH)
1716 return TMF_RESP_FUNC_COMPLETE;
1717 rc = mvs_debug_I_T_nexus_reset(dev);
1718 mv_printk("%s for device[%x]:rc= %d\n",
1719 __func__, mvi_dev->device_id, rc);
1722 spin_lock_irqsave(&mvi->lock, flags);
1723 mvs_release_task(mvi, dev);
1724 spin_unlock_irqrestore(&mvi->lock, flags);
1728 /* optional SAM-3 */
1729 int mvs_query_task(struct sas_task *task)
1732 struct scsi_lun lun;
1733 struct mvs_tmf_task tmf_task;
1734 int rc = TMF_RESP_FUNC_FAILED;
1736 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1737 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1738 struct domain_device *dev = task->dev;
1739 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1740 struct mvs_info *mvi = mvi_dev->mvi_info;
1742 int_to_scsilun(cmnd->device->lun, &lun);
1743 rc = mvs_find_tag(mvi, task, &tag);
1745 rc = TMF_RESP_FUNC_FAILED;
1749 tmf_task.tmf = TMF_QUERY_TASK;
1750 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1752 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1754 /* The task is still in Lun, release it then */
1755 case TMF_RESP_FUNC_SUCC:
1756 /* The task is not in Lun or failed, reset the phy */
1757 case TMF_RESP_FUNC_FAILED:
1758 case TMF_RESP_FUNC_COMPLETE:
1761 rc = TMF_RESP_FUNC_COMPLETE;
1765 mv_printk("%s:rc= %d\n", __func__, rc);
1769 /* mandatory SAM-3, still need free task/slot info */
1770 int mvs_abort_task(struct sas_task *task)
1772 struct scsi_lun lun;
1773 struct mvs_tmf_task tmf_task;
1774 struct domain_device *dev = task->dev;
1775 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1776 struct mvs_info *mvi;
1777 int rc = TMF_RESP_FUNC_FAILED;
1778 unsigned long flags;
1782 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
1783 rc = TMF_RESP_FUNC_FAILED;
1786 mvi = mvi_dev->mvi_info;
1788 spin_lock_irqsave(&task->task_state_lock, flags);
1789 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1790 spin_unlock_irqrestore(&task->task_state_lock, flags);
1791 rc = TMF_RESP_FUNC_COMPLETE;
1794 spin_unlock_irqrestore(&task->task_state_lock, flags);
1795 mvi_dev->dev_status = MVS_DEV_EH;
1796 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1797 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1799 int_to_scsilun(cmnd->device->lun, &lun);
1800 rc = mvs_find_tag(mvi, task, &tag);
1802 mv_printk("No such tag in %s\n", __func__);
1803 rc = TMF_RESP_FUNC_FAILED;
1807 tmf_task.tmf = TMF_ABORT_TASK;
1808 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1810 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1812 /* if successful, clear the task and callback forwards.*/
1813 if (rc == TMF_RESP_FUNC_COMPLETE) {
1815 struct mvs_slot_info *slot;
1817 if (task->lldd_task) {
1818 slot = task->lldd_task;
1819 slot_no = (u32) (slot - mvi->slot_info);
1820 spin_lock_irqsave(&mvi->lock, flags);
1821 mvs_slot_complete(mvi, slot_no, 1);
1822 spin_unlock_irqrestore(&mvi->lock, flags);
1826 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1827 task->task_proto & SAS_PROTOCOL_STP) {
1828 /* to do free register_set */
1829 if (SATA_DEV == dev->dev_type) {
1830 struct mvs_slot_info *slot = task->lldd_task;
1831 struct task_status_struct *tstat;
1832 u32 slot_idx = (u32)(slot - mvi->slot_info);
1833 tstat = &task->task_status;
1834 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1835 "slot=%p slot_idx=x%x\n",
1836 mvi, task, slot, slot_idx);
1837 tstat->stat = SAS_ABORTED_TASK;
1838 if (mvi_dev && mvi_dev->running_req)
1839 mvi_dev->running_req--;
1840 if (sas_protocol_ata(task->task_proto))
1841 mvs_free_reg_set(mvi, mvi_dev);
1842 mvs_slot_task_free(mvi, task, slot, slot_idx);
1850 if (rc != TMF_RESP_FUNC_COMPLETE)
1851 mv_printk("%s:rc= %d\n", __func__, rc);
1855 int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1857 int rc = TMF_RESP_FUNC_FAILED;
1858 struct mvs_tmf_task tmf_task;
1860 tmf_task.tmf = TMF_ABORT_TASK_SET;
1861 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1866 int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1868 int rc = TMF_RESP_FUNC_FAILED;
1869 struct mvs_tmf_task tmf_task;
1871 tmf_task.tmf = TMF_CLEAR_ACA;
1872 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1877 int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1879 int rc = TMF_RESP_FUNC_FAILED;
1880 struct mvs_tmf_task tmf_task;
1882 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1883 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1888 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1889 u32 slot_idx, int err)
1891 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1892 struct task_status_struct *tstat = &task->task_status;
1893 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1894 int stat = SAM_STAT_GOOD;
1897 resp->frame_len = sizeof(struct dev_to_host_fis);
1898 memcpy(&resp->ending_fis[0],
1899 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1900 sizeof(struct dev_to_host_fis));
1901 tstat->buf_valid_size = sizeof(*resp);
1902 if (unlikely(err)) {
1903 if (unlikely(err & CMD_ISS_STPD))
1904 stat = SAS_OPEN_REJECT;
1906 stat = SAS_PROTO_RESPONSE;
1912 static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1915 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1917 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1919 enum mvs_port_type type = PORT_TYPE_SAS;
1921 if (err_dw0 & CMD_ISS_STPD)
1922 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1924 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1926 stat = SAM_STAT_CHECK_CONDITION;
1927 switch (task->task_proto) {
1928 case SAS_PROTOCOL_SSP:
1929 stat = SAS_ABORTED_TASK;
1931 case SAS_PROTOCOL_SMP:
1932 stat = SAM_STAT_CHECK_CONDITION;
1935 case SAS_PROTOCOL_SATA:
1936 case SAS_PROTOCOL_STP:
1937 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1939 if (err_dw0 == 0x80400002)
1940 mv_printk("find reserved error, why?\n");
1942 task->ata_task.use_ncq = 0;
1943 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1953 int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1955 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1956 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1957 struct sas_task *task = slot->task;
1958 struct mvs_device *mvi_dev = NULL;
1959 struct task_status_struct *tstat;
1960 struct domain_device *dev;
1964 enum exec_status sts;
1968 if (unlikely(!task || !task->lldd_task || !task->dev))
1971 tstat = &task->task_status;
1973 mvi_dev = dev->lldd_dev;
1975 mvs_hba_cq_dump(mvi);
1977 spin_lock(&task->task_state_lock);
1978 task->task_state_flags &=
1979 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1980 task->task_state_flags |= SAS_TASK_STATE_DONE;
1982 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1983 spin_unlock(&task->task_state_lock);
1985 memset(tstat, 0, sizeof(*tstat));
1986 tstat->resp = SAS_TASK_COMPLETE;
1988 if (unlikely(aborted)) {
1989 tstat->stat = SAS_ABORTED_TASK;
1990 if (mvi_dev && mvi_dev->running_req)
1991 mvi_dev->running_req--;
1992 if (sas_protocol_ata(task->task_proto))
1993 mvs_free_reg_set(mvi, mvi_dev);
1995 mvs_slot_task_free(mvi, task, slot, slot_idx);
1999 if (unlikely(!mvi_dev || flags)) {
2001 mv_dprintk("port has not device.\n");
2002 tstat->stat = SAS_PHY_DOWN;
2006 /* error info record present */
2007 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
2008 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
2009 tstat->resp = SAS_TASK_COMPLETE;
2013 switch (task->task_proto) {
2014 case SAS_PROTOCOL_SSP:
2015 /* hw says status == 0, datapres == 0 */
2016 if (rx_desc & RXQ_GOOD) {
2017 tstat->stat = SAM_STAT_GOOD;
2018 tstat->resp = SAS_TASK_COMPLETE;
2020 /* response frame present */
2021 else if (rx_desc & RXQ_RSP) {
2022 struct ssp_response_iu *iu = slot->response +
2023 sizeof(struct mvs_err_info);
2024 sas_ssp_task_response(mvi->dev, task, iu);
2026 tstat->stat = SAM_STAT_CHECK_CONDITION;
2029 case SAS_PROTOCOL_SMP: {
2030 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
2031 tstat->stat = SAM_STAT_GOOD;
2032 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
2033 memcpy(to + sg_resp->offset,
2034 slot->response + sizeof(struct mvs_err_info),
2035 sg_dma_len(sg_resp));
2036 kunmap_atomic(to, KM_IRQ0);
2040 case SAS_PROTOCOL_SATA:
2041 case SAS_PROTOCOL_STP:
2042 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
2043 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
2048 tstat->stat = SAM_STAT_CHECK_CONDITION;
2051 if (!slot->port->port_attached) {
2052 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
2053 tstat->stat = SAS_PHY_DOWN;
2058 if (mvi_dev && mvi_dev->running_req) {
2059 mvi_dev->running_req--;
2060 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
2061 mvs_free_reg_set(mvi, mvi_dev);
2063 mvs_slot_task_free(mvi, task, slot, slot_idx);
2066 spin_unlock(&mvi->lock);
2067 if (task->task_done)
2068 task->task_done(task);
2070 mv_dprintk("why has not task_done.\n");
2071 spin_lock(&mvi->lock);
2076 void mvs_do_release_task(struct mvs_info *mvi,
2077 int phy_no, struct domain_device *dev)
2080 struct mvs_phy *phy;
2081 struct mvs_port *port;
2082 struct mvs_slot_info *slot, *slot2;
2084 phy = &mvi->phy[phy_no];
2088 /* clean cmpl queue in case request is already finished */
2089 mvs_int_rx(mvi, false);
2093 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
2094 struct sas_task *task;
2095 slot_idx = (u32) (slot - mvi->slot_info);
2098 if (dev && task->dev != dev)
2101 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
2102 slot_idx, slot->slot_tag, task);
2103 MVS_CHIP_DISP->command_active(mvi, slot_idx);
2105 mvs_slot_complete(mvi, slot_idx, 1);
2109 void mvs_release_task(struct mvs_info *mvi,
2110 struct domain_device *dev)
2112 int i, phyno[WIDE_PORT_MAX_PHY], num;
2114 num = mvs_find_dev_phyno(dev, phyno);
2115 for (i = 0; i < num; i++)
2116 mvs_do_release_task(mvi, phyno[i], dev);
2119 static void mvs_phy_disconnected(struct mvs_phy *phy)
2121 phy->phy_attached = 0;
2122 phy->att_dev_info = 0;
2123 phy->att_dev_sas_addr = 0;
2126 static void mvs_work_queue(struct work_struct *work)
2128 struct delayed_work *dw = container_of(work, struct delayed_work, work);
2129 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
2130 struct mvs_info *mvi = mwq->mvi;
2131 unsigned long flags;
2133 spin_lock_irqsave(&mvi->lock, flags);
2134 if (mwq->handler & PHY_PLUG_EVENT) {
2135 u32 phy_no = (unsigned long) mwq->data;
2136 struct sas_ha_struct *sas_ha = mvi->sas;
2137 struct mvs_phy *phy = &mvi->phy[phy_no];
2138 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2140 if (phy->phy_event & PHY_PLUG_OUT) {
2142 struct sas_identify_frame *id;
2143 id = (struct sas_identify_frame *)phy->frame_rcvd;
2144 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
2145 phy->phy_event &= ~PHY_PLUG_OUT;
2146 if (!(tmp & PHY_READY_MASK)) {
2147 sas_phy_disconnected(sas_phy);
2148 mvs_phy_disconnected(phy);
2149 sas_ha->notify_phy_event(sas_phy,
2150 PHYE_LOSS_OF_SIGNAL);
2151 mv_dprintk("phy%d Removed Device\n", phy_no);
2153 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2154 mvs_update_phyinfo(mvi, phy_no, 1);
2155 mvs_bytes_dmaed(mvi, phy_no);
2156 mvs_port_notify_formed(sas_phy, 0);
2157 mv_dprintk("phy%d Attached Device\n", phy_no);
2161 list_del(&mwq->entry);
2162 spin_unlock_irqrestore(&mvi->lock, flags);
2166 static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
2171 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
2175 mwq->handler = handler;
2176 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
2177 list_add_tail(&mwq->entry, &mvi->wq_list);
2178 schedule_delayed_work(&mwq->work_q, HZ * 2);
2185 static void mvs_sig_time_out(unsigned long tphy)
2187 struct mvs_phy *phy = (struct mvs_phy *)tphy;
2188 struct mvs_info *mvi = phy->mvi;
2191 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
2192 if (&mvi->phy[phy_no] == phy) {
2193 mv_dprintk("Get signature time out, reset phy %d\n",
2194 phy_no+mvi->id*mvi->chip->n_phy);
2195 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2200 void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2203 struct sas_ha_struct *sas_ha = mvi->sas;
2204 struct mvs_phy *phy = &mvi->phy[phy_no];
2205 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2207 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2208 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2209 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2210 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2214 * events is port event now ,
2215 * we need check the interrupt status which belongs to per port.
2218 if (phy->irq_status & PHYEV_DCDR_ERR) {
2219 mv_dprintk("port %d STP decoding error.\n",
2220 phy_no + mvi->id*mvi->chip->n_phy);
2223 if (phy->irq_status & PHYEV_POOF) {
2224 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2225 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2227 mvs_do_release_task(mvi, phy_no, NULL);
2228 phy->phy_event |= PHY_PLUG_OUT;
2229 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
2230 mvs_handle_event(mvi,
2231 (void *)(unsigned long)phy_no,
2233 ready = mvs_is_phy_ready(mvi, phy_no);
2235 mv_dprintk("phy%d Unplug Notice\n",
2237 mvi->id * mvi->chip->n_phy);
2238 if (ready || dev_sata) {
2239 if (MVS_CHIP_DISP->stp_reset)
2240 MVS_CHIP_DISP->stp_reset(mvi,
2243 MVS_CHIP_DISP->phy_reset(mvi,
2250 if (phy->irq_status & PHYEV_COMWAKE) {
2251 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2252 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2253 tmp | PHYEV_SIG_FIS);
2254 if (phy->timer.function == NULL) {
2255 phy->timer.data = (unsigned long)phy;
2256 phy->timer.function = mvs_sig_time_out;
2257 phy->timer.expires = jiffies + 10*HZ;
2258 add_timer(&phy->timer);
2261 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2262 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2263 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2264 if (phy->phy_status) {
2266 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2267 if (phy->phy_type & PORT_TYPE_SATA) {
2268 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2270 tmp &= ~PHYEV_SIG_FIS;
2271 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2274 mvs_update_phyinfo(mvi, phy_no, 0);
2275 if (phy->phy_type & PORT_TYPE_SAS) {
2276 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
2280 mvs_bytes_dmaed(mvi, phy_no);
2281 /* whether driver is going to handle hot plug */
2282 if (phy->phy_event & PHY_PLUG_OUT) {
2283 mvs_port_notify_formed(sas_phy, 0);
2284 phy->phy_event &= ~PHY_PLUG_OUT;
2287 mv_dprintk("plugin interrupt but phy%d is gone\n",
2288 phy_no + mvi->id*mvi->chip->n_phy);
2290 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2291 mv_dprintk("port %d broadcast change.\n",
2292 phy_no + mvi->id*mvi->chip->n_phy);
2293 /* exception for Samsung disk drive*/
2295 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2297 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2300 int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2302 u32 rx_prod_idx, rx_desc;
2305 /* the first dword in the RX ring is special: it contains
2306 * a mirror of the hardware's RX producer index, so that
2307 * we don't have to stall the CPU reading that register.
2308 * The actual RX ring is offset by one dword, due to this.
2310 rx_prod_idx = mvi->rx_cons;
2311 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2312 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2315 /* The CMPL_Q may come late, read from register and try again
2316 * note: if coalescing is enabled,
2317 * it will need to read from register every time for sure
2319 if (unlikely(mvi->rx_cons == rx_prod_idx))
2320 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2322 if (mvi->rx_cons == rx_prod_idx)
2325 while (mvi->rx_cons != rx_prod_idx) {
2326 /* increment our internal RX consumer pointer */
2327 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2328 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2330 if (likely(rx_desc & RXQ_DONE))
2331 mvs_slot_complete(mvi, rx_desc, 0);
2332 if (rx_desc & RXQ_ATTN) {
2334 } else if (rx_desc & RXQ_ERR) {
2335 if (!(rx_desc & RXQ_DONE))
2336 mvs_slot_complete(mvi, rx_desc, 0);
2337 } else if (rx_desc & RXQ_SLOT_RESET) {
2338 mvs_slot_free(mvi, rx_desc);
2342 if (attn && self_clear)
2343 MVS_CHIP_DISP->int_full(mvi);