1 /* Copyright 2012 STEC, Inc.
3 * This file is licensed under the terms of the 3-clause
4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6 * at your option. Both licenses are also available in the LICENSE file
7 * distributed with this project. This file may not be copied, modified,
8 * or distributed except in accordance with those terms.
9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10 * Initial Driver Design!
11 * Thomas Swann <tswann@stec-inc.com>
13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14 * biomode implementation.
15 * Akhil Bhansali <abhansali@stec-inc.com>
16 * Added support for DISCARD / FLUSH and FUA.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/blkdev.h>
26 #include <linux/sched.h>
27 #include <linux/interrupt.h>
28 #include <linux/compiler.h>
29 #include <linux/workqueue.h>
30 #include <linux/bitops.h>
31 #include <linux/delay.h>
32 #include <linux/time.h>
33 #include <linux/hdreg.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/completion.h>
36 #include <linux/scatterlist.h>
37 #include <linux/version.h>
38 #include <linux/err.h>
39 #include <linux/scatterlist.h>
40 #include <linux/aer.h>
41 #include <linux/ctype.h>
42 #include <linux/wait.h>
43 #include <linux/uio.h>
44 #include <scsi/scsi.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
50 #include "skd_s1120.h"
52 static int skd_dbg_level;
53 static int skd_isr_comp_limit = 4;
59 STEC_LINK_UNKNOWN = 0xFF
63 SKD_FLUSH_INITIALIZER,
64 SKD_FLUSH_ZERO_SIZE_FIRST,
65 SKD_FLUSH_DATA_SECOND,
68 #define SKD_ASSERT(expr) \
70 if (unlikely(!(expr))) { \
71 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
72 # expr, __FILE__, __func__, __LINE__); \
76 #define DRV_NAME "skd"
77 #define DRV_VERSION "2.2.1"
78 #define DRV_BUILD_ID "0260"
79 #define PFX DRV_NAME ": "
80 #define DRV_BIN_VERSION 0x100
81 #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
83 MODULE_AUTHOR("bug-reports: support@stec-inc.com");
84 MODULE_LICENSE("Dual BSD/GPL");
86 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
87 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
89 #define PCI_VENDOR_ID_STEC 0x1B39
90 #define PCI_DEVICE_ID_S1120 0x0001
92 #define SKD_FUA_NV (1 << 1)
93 #define SKD_MINORS_PER_DEVICE 16
95 #define SKD_MAX_QUEUE_DEPTH 200u
97 #define SKD_PAUSE_TIMEOUT (5 * 1000)
99 #define SKD_N_FITMSG_BYTES (512u)
101 #define SKD_N_SPECIAL_CONTEXT 32u
102 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
104 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
105 * 128KB limit. That allows 4096*4K = 16M xfer size
107 #define SKD_N_SG_PER_REQ_DEFAULT 256u
108 #define SKD_N_SG_PER_SPECIAL 256u
110 #define SKD_N_COMPLETION_ENTRY 256u
111 #define SKD_N_READ_CAP_BYTES (8u)
113 #define SKD_N_INTERNAL_BYTES (512u)
115 /* 5 bits of uniqifier, 0xF800 */
116 #define SKD_ID_INCR (0x400)
117 #define SKD_ID_TABLE_MASK (3u << 8u)
118 #define SKD_ID_RW_REQUEST (0u << 8u)
119 #define SKD_ID_INTERNAL (1u << 8u)
120 #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
121 #define SKD_ID_FIT_MSG (3u << 8u)
122 #define SKD_ID_SLOT_MASK 0x00FFu
123 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
125 #define SKD_N_TIMEOUT_SLOT 4u
126 #define SKD_TIMEOUT_SLOT_MASK 3u
128 #define SKD_N_MAX_SECTORS 2048u
130 #define SKD_MAX_RETRIES 2u
132 #define SKD_TIMER_SECONDS(seconds) (seconds)
133 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
135 #define INQ_STD_NBYTES 36
136 #define SKD_DISCARD_CDB_LENGTH 24
138 enum skd_drvr_state {
142 SKD_DRVR_STATE_STARTING,
143 SKD_DRVR_STATE_ONLINE,
144 SKD_DRVR_STATE_PAUSING,
145 SKD_DRVR_STATE_PAUSED,
146 SKD_DRVR_STATE_DRAINING_TIMEOUT,
147 SKD_DRVR_STATE_RESTARTING,
148 SKD_DRVR_STATE_RESUMING,
149 SKD_DRVR_STATE_STOPPING,
150 SKD_DRVR_STATE_FAULT,
151 SKD_DRVR_STATE_DISAPPEARED,
152 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
153 SKD_DRVR_STATE_BUSY_ERASE,
154 SKD_DRVR_STATE_BUSY_SANITIZE,
155 SKD_DRVR_STATE_BUSY_IMMINENT,
156 SKD_DRVR_STATE_WAIT_BOOT,
157 SKD_DRVR_STATE_SYNCING,
160 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
161 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
162 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
163 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
164 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
165 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
166 #define SKD_START_WAIT_SECONDS 90u
172 SKD_REQ_STATE_COMPLETED,
173 SKD_REQ_STATE_TIMEOUT,
174 SKD_REQ_STATE_ABORTED,
177 enum skd_fit_msg_state {
182 enum skd_check_status_action {
183 SKD_CHECK_STATUS_REPORT_GOOD,
184 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
185 SKD_CHECK_STATUS_REQUEUE_REQUEST,
186 SKD_CHECK_STATUS_REPORT_ERROR,
187 SKD_CHECK_STATUS_BUSY_IMMINENT,
190 struct skd_fitmsg_context {
191 enum skd_fit_msg_state state;
193 struct skd_fitmsg_context *next;
202 dma_addr_t mb_dma_address;
205 struct skd_request_context {
206 enum skd_req_state state;
208 struct skd_request_context *next;
219 struct scatterlist *sg;
223 struct fit_sg_descriptor *sksg_list;
224 dma_addr_t sksg_dma_address;
226 struct fit_completion_entry_v1 completion;
228 struct fit_comp_error_info err_info;
231 #define SKD_DATA_DIR_HOST_TO_CARD 1
232 #define SKD_DATA_DIR_CARD_TO_HOST 2
233 #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
235 struct skd_special_context {
236 struct skd_request_context req;
241 dma_addr_t db_dma_address;
244 dma_addr_t mb_dma_address;
257 struct sg_iovec *iov;
258 struct sg_iovec no_iov_iov;
260 struct skd_special_context *skspcl;
263 typedef enum skd_irq_type {
269 #define SKD_MAX_BARS 2
272 volatile void __iomem *mem_map[SKD_MAX_BARS];
273 resource_size_t mem_phys[SKD_MAX_BARS];
274 u32 mem_size[SKD_MAX_BARS];
276 skd_irq_type_t irq_type;
278 struct skd_msix_entry *msix_entries;
280 struct pci_dev *pdev;
281 int pcie_error_reporting_is_enabled;
284 struct gendisk *disk;
285 struct request_queue *queue;
286 struct device *class_dev;
290 atomic_t device_count;
296 enum skd_drvr_state state;
300 u32 cur_max_queue_depth;
301 u32 queue_low_water_mark;
302 u32 dev_max_queue_depth;
304 u32 num_fitmsg_context;
307 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
309 struct skd_fitmsg_context *skmsg_free_list;
310 struct skd_fitmsg_context *skmsg_table;
312 struct skd_request_context *skreq_free_list;
313 struct skd_request_context *skreq_table;
315 struct skd_special_context *skspcl_free_list;
316 struct skd_special_context *skspcl_table;
318 struct skd_special_context internal_skspcl;
319 u32 read_cap_blocksize;
320 u32 read_cap_last_lba;
321 int read_cap_is_valid;
322 int inquiry_is_valid;
323 u8 inq_serial_num[13]; /*12 chars plus null term */
324 u8 id_str[80]; /* holds a composite name (pci + sernum) */
328 struct fit_completion_entry_v1 *skcomp_table;
329 struct fit_comp_error_info *skerr_table;
330 dma_addr_t cq_dma_address;
332 wait_queue_head_t waitq;
334 struct timer_list timer;
345 u32 connect_time_stamp;
347 #define SKD_MAX_CONNECT_RETRIES 16
353 struct work_struct completion_worker;
356 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
357 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
358 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
360 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
364 if (likely(skdev->dbg_level < 2))
365 return readl(skdev->mem_map[1] + offset);
368 val = readl(skdev->mem_map[1] + offset);
370 pr_debug("%s:%s:%d offset %x = %x\n",
371 skdev->name, __func__, __LINE__, offset, val);
377 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
380 if (likely(skdev->dbg_level < 2)) {
381 writel(val, skdev->mem_map[1] + offset);
385 writel(val, skdev->mem_map[1] + offset);
387 pr_debug("%s:%s:%d offset %x = %x\n",
388 skdev->name, __func__, __LINE__, offset, val);
392 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
395 if (likely(skdev->dbg_level < 2)) {
396 writeq(val, skdev->mem_map[1] + offset);
400 writeq(val, skdev->mem_map[1] + offset);
402 pr_debug("%s:%s:%d offset %x = %016llx\n",
403 skdev->name, __func__, __LINE__, offset, val);
408 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
409 static int skd_isr_type = SKD_IRQ_DEFAULT;
411 module_param(skd_isr_type, int, 0444);
412 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
413 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
415 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
416 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
418 module_param(skd_max_req_per_msg, int, 0444);
419 MODULE_PARM_DESC(skd_max_req_per_msg,
420 "Maximum SCSI requests packed in a single message."
421 " (1-14, default==1)");
423 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
424 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
425 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
427 module_param(skd_max_queue_depth, int, 0444);
428 MODULE_PARM_DESC(skd_max_queue_depth,
429 "Maximum SCSI requests issued to s1120."
430 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
432 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
433 module_param(skd_sgs_per_request, int, 0444);
434 MODULE_PARM_DESC(skd_sgs_per_request,
435 "Maximum SG elements per block request."
436 " (1-4096, default==256)");
438 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
439 module_param(skd_max_pass_thru, int, 0444);
440 MODULE_PARM_DESC(skd_max_pass_thru,
441 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
443 module_param(skd_dbg_level, int, 0444);
444 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
446 module_param(skd_isr_comp_limit, int, 0444);
447 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
449 /* Major device number dynamically assigned. */
450 static u32 skd_major;
452 static void skd_destruct(struct skd_device *skdev);
453 static const struct block_device_operations skd_blockdev_ops;
454 static void skd_send_fitmsg(struct skd_device *skdev,
455 struct skd_fitmsg_context *skmsg);
456 static void skd_send_special_fitmsg(struct skd_device *skdev,
457 struct skd_special_context *skspcl);
458 static void skd_request_fn(struct request_queue *rq);
459 static void skd_end_request(struct skd_device *skdev,
460 struct skd_request_context *skreq, int error);
461 static int skd_preop_sg_list(struct skd_device *skdev,
462 struct skd_request_context *skreq);
463 static void skd_postop_sg_list(struct skd_device *skdev,
464 struct skd_request_context *skreq);
466 static void skd_restart_device(struct skd_device *skdev);
467 static int skd_quiesce_dev(struct skd_device *skdev);
468 static int skd_unquiesce_dev(struct skd_device *skdev);
469 static void skd_release_special(struct skd_device *skdev,
470 struct skd_special_context *skspcl);
471 static void skd_disable_interrupts(struct skd_device *skdev);
472 static void skd_isr_fwstate(struct skd_device *skdev);
473 static void skd_recover_requests(struct skd_device *skdev, int requeue);
474 static void skd_soft_reset(struct skd_device *skdev);
476 static const char *skd_name(struct skd_device *skdev);
477 const char *skd_drive_state_to_str(int state);
478 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
479 static void skd_log_skdev(struct skd_device *skdev, const char *event);
480 static void skd_log_skmsg(struct skd_device *skdev,
481 struct skd_fitmsg_context *skmsg, const char *event);
482 static void skd_log_skreq(struct skd_device *skdev,
483 struct skd_request_context *skreq, const char *event);
486 *****************************************************************************
487 * READ/WRITE REQUESTS
488 *****************************************************************************
490 static void skd_fail_all_pending(struct skd_device *skdev)
492 struct request_queue *q = skdev->queue;
496 req = blk_peek_request(q);
499 blk_start_request(req);
500 __blk_end_request_all(req, -EIO);
505 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
506 int data_dir, unsigned lba,
509 if (data_dir == READ)
510 scsi_req->cdb[0] = 0x28;
512 scsi_req->cdb[0] = 0x2a;
514 scsi_req->cdb[1] = 0;
515 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
516 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
517 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
518 scsi_req->cdb[5] = (lba & 0xff);
519 scsi_req->cdb[6] = 0;
520 scsi_req->cdb[7] = (count & 0xff00) >> 8;
521 scsi_req->cdb[8] = count & 0xff;
522 scsi_req->cdb[9] = 0;
526 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
527 struct skd_request_context *skreq)
529 skreq->flush_cmd = 1;
531 scsi_req->cdb[0] = 0x35;
532 scsi_req->cdb[1] = 0;
533 scsi_req->cdb[2] = 0;
534 scsi_req->cdb[3] = 0;
535 scsi_req->cdb[4] = 0;
536 scsi_req->cdb[5] = 0;
537 scsi_req->cdb[6] = 0;
538 scsi_req->cdb[7] = 0;
539 scsi_req->cdb[8] = 0;
540 scsi_req->cdb[9] = 0;
544 skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
545 struct skd_request_context *skreq,
553 buf = page_address(page);
554 len = SKD_DISCARD_CDB_LENGTH;
556 scsi_req->cdb[0] = UNMAP;
557 scsi_req->cdb[8] = len;
559 put_unaligned_be16(6 + 16, &buf[0]);
560 put_unaligned_be16(16, &buf[2]);
561 put_unaligned_be64(lba, &buf[8]);
562 put_unaligned_be32(count, &buf[16]);
565 blk_add_request_payload(req, page, len);
568 static void skd_request_fn_not_online(struct request_queue *q);
570 static void skd_request_fn(struct request_queue *q)
572 struct skd_device *skdev = q->queuedata;
573 struct skd_fitmsg_context *skmsg = NULL;
574 struct fit_msg_hdr *fmh = NULL;
575 struct skd_request_context *skreq;
576 struct request *req = NULL;
577 struct skd_scsi_request *scsi_req;
579 unsigned long io_flags;
592 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
593 skd_request_fn_not_online(q);
597 if (blk_queue_stopped(skdev->queue)) {
598 if (skdev->skmsg_free_list == NULL ||
599 skdev->skreq_free_list == NULL ||
600 skdev->in_flight >= skdev->queue_low_water_mark)
601 /* There is still some kind of shortage */
604 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
609 * - There are no more native requests
610 * - There are already the maximum number of requests in progress
611 * - There are no more skd_request_context entries
612 * - There are no more FIT msg buffers
618 req = blk_peek_request(q);
620 /* Are there any native requests to start? */
624 lba = (u32)blk_rq_pos(req);
625 count = blk_rq_sectors(req);
626 data_dir = rq_data_dir(req);
627 io_flags = req->cmd_flags;
629 if (io_flags & REQ_FLUSH)
632 if (io_flags & REQ_FUA)
635 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
636 "count=%u(0x%x) dir=%d\n",
637 skdev->name, __func__, __LINE__,
638 req, lba, lba, count, count, data_dir);
640 /* At this point we know there is a request */
642 /* Are too many requets already in progress? */
643 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
644 pr_debug("%s:%s:%d qdepth %d, limit %d\n",
645 skdev->name, __func__, __LINE__,
646 skdev->in_flight, skdev->cur_max_queue_depth);
650 /* Is a skd_request_context available? */
651 skreq = skdev->skreq_free_list;
653 pr_debug("%s:%s:%d Out of req=%p\n",
654 skdev->name, __func__, __LINE__, q);
657 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
658 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
660 /* Now we check to see if we can get a fit msg */
662 if (skdev->skmsg_free_list == NULL) {
663 pr_debug("%s:%s:%d Out of msg\n",
664 skdev->name, __func__, __LINE__);
669 skreq->flush_cmd = 0;
671 skreq->sg_byte_count = 0;
672 skreq->discard_page = 0;
675 * OK to now dequeue request from q.
677 * At this point we are comitted to either start or reject
678 * the native request. Note that skd_request_context is
679 * available but is still at the head of the free list.
681 blk_start_request(req);
683 skreq->fitmsg_id = 0;
685 /* Either a FIT msg is in progress or we have to start one. */
687 /* Are there any FIT msg buffers available? */
688 skmsg = skdev->skmsg_free_list;
690 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
691 skdev->name, __func__, __LINE__,
695 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
696 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
698 skdev->skmsg_free_list = skmsg->next;
700 skmsg->state = SKD_MSG_STATE_BUSY;
701 skmsg->id += SKD_ID_INCR;
703 /* Initialize the FIT msg header */
704 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
705 memset(fmh, 0, sizeof(*fmh));
706 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
707 skmsg->length = sizeof(*fmh);
710 skreq->fitmsg_id = skmsg->id;
713 * Note that a FIT msg may have just been started
714 * but contains no SoFIT requests yet.
718 * Transcode the request, checking as we go. The outcome of
719 * the transcoding is represented by the error variable.
721 cmd_ptr = &skmsg->msg_buf[skmsg->length];
722 memset(cmd_ptr, 0, 32);
724 be_lba = cpu_to_be32(lba);
725 be_count = cpu_to_be32(count);
726 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
727 cmdctxt = skreq->id + SKD_ID_INCR;
730 scsi_req->hdr.tag = cmdctxt;
731 scsi_req->hdr.sg_list_dma_address = be_dmaa;
733 if (data_dir == READ)
734 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
736 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
738 if (io_flags & REQ_DISCARD) {
739 page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
741 pr_err("request_fn:Page allocation failed.\n");
742 skd_end_request(skdev, skreq, -ENOMEM);
745 skreq->discard_page = 1;
746 req->completion_data = page;
747 skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
749 } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
750 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
751 SKD_ASSERT(skreq->flush_cmd == 1);
754 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
758 scsi_req->cdb[1] |= SKD_FUA_NV;
763 error = skd_preop_sg_list(skdev, skreq);
767 * Complete the native request with error.
768 * Note that the request context is still at the
769 * head of the free list, and that the SoFIT request
770 * was encoded into the FIT msg buffer but the FIT
771 * msg length has not been updated. In short, the
772 * only resource that has been allocated but might
773 * not be used is that the FIT msg could be empty.
775 pr_debug("%s:%s:%d error Out\n",
776 skdev->name, __func__, __LINE__);
777 skd_end_request(skdev, skreq, error);
782 scsi_req->hdr.sg_list_len_bytes =
783 cpu_to_be32(skreq->sg_byte_count);
785 /* Complete resource allocations. */
786 skdev->skreq_free_list = skreq->next;
787 skreq->state = SKD_REQ_STATE_BUSY;
788 skreq->id += SKD_ID_INCR;
790 skmsg->length += sizeof(struct skd_scsi_request);
791 fmh->num_protocol_cmds_coalesced++;
794 * Update the active request counts.
795 * Capture the timeout timestamp.
797 skreq->timeout_stamp = skdev->timeout_stamp;
798 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
799 skdev->timeout_slot[timo_slot]++;
801 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
802 skdev->name, __func__, __LINE__,
803 skreq->id, skdev->in_flight);
806 * If the FIT msg buffer is full send it.
808 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
809 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
810 skd_send_fitmsg(skdev, skmsg);
817 * Is a FIT msg in progress? If it is empty put the buffer back
818 * on the free list. If it is non-empty send what we got.
819 * This minimizes latency when there are fewer requests than
820 * what fits in a FIT msg.
823 /* Bigger than just a FIT msg header? */
824 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
825 pr_debug("%s:%s:%d sending msg=%p, len %d\n",
826 skdev->name, __func__, __LINE__,
827 skmsg, skmsg->length);
828 skd_send_fitmsg(skdev, skmsg);
831 * The FIT msg is empty. It means we got started
832 * on the msg, but the requests were rejected.
834 skmsg->state = SKD_MSG_STATE_IDLE;
835 skmsg->id += SKD_ID_INCR;
836 skmsg->next = skdev->skmsg_free_list;
837 skdev->skmsg_free_list = skmsg;
844 * If req is non-NULL it means there is something to do but
845 * we are out of a resource.
848 blk_stop_queue(skdev->queue);
851 static void skd_end_request(struct skd_device *skdev,
852 struct skd_request_context *skreq, int error)
854 struct request *req = skreq->req;
855 unsigned int io_flags = req->cmd_flags;
857 if ((io_flags & REQ_DISCARD) &&
858 (skreq->discard_page == 1)) {
859 pr_debug("%s:%s:%d, free the page!",
860 skdev->name, __func__, __LINE__);
861 __free_page(req->completion_data);
864 if (unlikely(error)) {
865 struct request *req = skreq->req;
866 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
867 u32 lba = (u32)blk_rq_pos(req);
868 u32 count = blk_rq_sectors(req);
870 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
871 skd_name(skdev), cmd, lba, count, skreq->id);
873 pr_debug("%s:%s:%d id=0x%x error=%d\n",
874 skdev->name, __func__, __LINE__, skreq->id, error);
876 __blk_end_request_all(skreq->req, error);
879 static int skd_preop_sg_list(struct skd_device *skdev,
880 struct skd_request_context *skreq)
882 struct request *req = skreq->req;
883 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
884 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
885 struct scatterlist *sg = &skreq->sg[0];
889 skreq->sg_byte_count = 0;
891 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
892 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
894 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
899 * Map scatterlist to PCI bus addresses.
900 * Note PCI might change the number of entries.
902 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
906 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
910 for (i = 0; i < n_sg; i++) {
911 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
912 u32 cnt = sg_dma_len(&sg[i]);
913 uint64_t dma_addr = sg_dma_address(&sg[i]);
915 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
916 sgd->byte_count = cnt;
917 skreq->sg_byte_count += cnt;
918 sgd->host_side_addr = dma_addr;
919 sgd->dev_side_addr = 0;
922 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
923 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
925 if (unlikely(skdev->dbg_level > 1)) {
926 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
927 skdev->name, __func__, __LINE__,
928 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
929 for (i = 0; i < n_sg; i++) {
930 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
931 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
932 "addr=0x%llx next=0x%llx\n",
933 skdev->name, __func__, __LINE__,
934 i, sgd->byte_count, sgd->control,
935 sgd->host_side_addr, sgd->next_desc_ptr);
942 static void skd_postop_sg_list(struct skd_device *skdev,
943 struct skd_request_context *skreq)
945 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
946 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
949 * restore the next ptr for next IO request so we
950 * don't have to set it every time.
952 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
953 skreq->sksg_dma_address +
954 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
955 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
958 static void skd_request_fn_not_online(struct request_queue *q)
960 struct skd_device *skdev = q->queuedata;
963 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
965 skd_log_skdev(skdev, "req_not_online");
966 switch (skdev->state) {
967 case SKD_DRVR_STATE_PAUSING:
968 case SKD_DRVR_STATE_PAUSED:
969 case SKD_DRVR_STATE_STARTING:
970 case SKD_DRVR_STATE_RESTARTING:
971 case SKD_DRVR_STATE_WAIT_BOOT:
972 /* In case of starting, we haven't started the queue,
973 * so we can't get here... but requests are
974 * possibly hanging out waiting for us because we
975 * reported the dev/skd0 already. They'll wait
976 * forever if connect doesn't complete.
977 * What to do??? delay dev/skd0 ??
979 case SKD_DRVR_STATE_BUSY:
980 case SKD_DRVR_STATE_BUSY_IMMINENT:
981 case SKD_DRVR_STATE_BUSY_ERASE:
982 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
985 case SKD_DRVR_STATE_BUSY_SANITIZE:
986 case SKD_DRVR_STATE_STOPPING:
987 case SKD_DRVR_STATE_SYNCING:
988 case SKD_DRVR_STATE_FAULT:
989 case SKD_DRVR_STATE_DISAPPEARED:
995 /* If we get here, terminate all pending block requeusts
996 * with EIO and any scsi pass thru with appropriate sense
999 skd_fail_all_pending(skdev);
1003 *****************************************************************************
1005 *****************************************************************************
1008 static void skd_timer_tick_not_online(struct skd_device *skdev);
1010 static void skd_timer_tick(ulong arg)
1012 struct skd_device *skdev = (struct skd_device *)arg;
1015 u32 overdue_timestamp;
1016 unsigned long reqflags;
1019 if (skdev->state == SKD_DRVR_STATE_FAULT)
1020 /* The driver has declared fault, and we want it to
1021 * stay that way until driver is reloaded.
1025 spin_lock_irqsave(&skdev->lock, reqflags);
1027 state = SKD_READL(skdev, FIT_STATUS);
1028 state &= FIT_SR_DRIVE_STATE_MASK;
1029 if (state != skdev->drive_state)
1030 skd_isr_fwstate(skdev);
1032 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1033 skd_timer_tick_not_online(skdev);
1034 goto timer_func_out;
1036 skdev->timeout_stamp++;
1037 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1040 * All requests that happened during the previous use of
1041 * this slot should be done by now. The previous use was
1042 * over 7 seconds ago.
1044 if (skdev->timeout_slot[timo_slot] == 0)
1045 goto timer_func_out;
1047 /* Something is overdue */
1048 overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
1050 pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
1051 skdev->name, __func__, __LINE__,
1052 skdev->timeout_slot[timo_slot], skdev->in_flight);
1053 pr_err("(%s): Overdue IOs (%d), busy %d\n",
1054 skd_name(skdev), skdev->timeout_slot[timo_slot],
1057 skdev->timer_countdown = SKD_DRAINING_TIMO;
1058 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1059 skdev->timo_slot = timo_slot;
1060 blk_stop_queue(skdev->queue);
1063 mod_timer(&skdev->timer, (jiffies + HZ));
1065 spin_unlock_irqrestore(&skdev->lock, reqflags);
1068 static void skd_timer_tick_not_online(struct skd_device *skdev)
1070 switch (skdev->state) {
1071 case SKD_DRVR_STATE_IDLE:
1072 case SKD_DRVR_STATE_LOAD:
1074 case SKD_DRVR_STATE_BUSY_SANITIZE:
1075 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1076 skdev->name, __func__, __LINE__,
1077 skdev->drive_state, skdev->state);
1078 /* If we've been in sanitize for 3 seconds, we figure we're not
1079 * going to get anymore completions, so recover requests now
1081 if (skdev->timer_countdown > 0) {
1082 skdev->timer_countdown--;
1085 skd_recover_requests(skdev, 0);
1088 case SKD_DRVR_STATE_BUSY:
1089 case SKD_DRVR_STATE_BUSY_IMMINENT:
1090 case SKD_DRVR_STATE_BUSY_ERASE:
1091 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1092 skdev->name, __func__, __LINE__,
1093 skdev->state, skdev->timer_countdown);
1094 if (skdev->timer_countdown > 0) {
1095 skdev->timer_countdown--;
1098 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1099 skdev->name, __func__, __LINE__,
1100 skdev->state, skdev->timer_countdown);
1101 skd_restart_device(skdev);
1104 case SKD_DRVR_STATE_WAIT_BOOT:
1105 case SKD_DRVR_STATE_STARTING:
1106 if (skdev->timer_countdown > 0) {
1107 skdev->timer_countdown--;
1110 /* For now, we fault the drive. Could attempt resets to
1111 * revcover at some point. */
1112 skdev->state = SKD_DRVR_STATE_FAULT;
1114 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1115 skd_name(skdev), skdev->drive_state);
1117 /*start the queue so we can respond with error to requests */
1118 /* wakeup anyone waiting for startup complete */
1119 blk_start_queue(skdev->queue);
1120 skdev->gendisk_on = -1;
1121 wake_up_interruptible(&skdev->waitq);
1124 case SKD_DRVR_STATE_ONLINE:
1125 /* shouldn't get here. */
1128 case SKD_DRVR_STATE_PAUSING:
1129 case SKD_DRVR_STATE_PAUSED:
1132 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1133 pr_debug("%s:%s:%d "
1134 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1135 skdev->name, __func__, __LINE__,
1137 skdev->timer_countdown,
1139 skdev->timeout_slot[skdev->timo_slot]);
1140 /* if the slot has cleared we can let the I/O continue */
1141 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1142 pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1143 skdev->name, __func__, __LINE__);
1144 skdev->state = SKD_DRVR_STATE_ONLINE;
1145 blk_start_queue(skdev->queue);
1148 if (skdev->timer_countdown > 0) {
1149 skdev->timer_countdown--;
1152 skd_restart_device(skdev);
1155 case SKD_DRVR_STATE_RESTARTING:
1156 if (skdev->timer_countdown > 0) {
1157 skdev->timer_countdown--;
1160 /* For now, we fault the drive. Could attempt resets to
1161 * revcover at some point. */
1162 skdev->state = SKD_DRVR_STATE_FAULT;
1163 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1164 skd_name(skdev), skdev->drive_state);
1167 * Recovering does two things:
1168 * 1. completes IO with error
1169 * 2. reclaims dma resources
1170 * When is it safe to recover requests?
1171 * - if the drive state is faulted
1172 * - if the state is still soft reset after out timeout
1173 * - if the drive registers are dead (state = FF)
1174 * If it is "unsafe", we still need to recover, so we will
1175 * disable pci bus mastering and disable our interrupts.
1178 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1179 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1180 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1181 /* It never came out of soft reset. Try to
1182 * recover the requests and then let them
1183 * fail. This is to mitigate hung processes. */
1184 skd_recover_requests(skdev, 0);
1186 pr_err("(%s): Disable BusMaster (%x)\n",
1187 skd_name(skdev), skdev->drive_state);
1188 pci_disable_device(skdev->pdev);
1189 skd_disable_interrupts(skdev);
1190 skd_recover_requests(skdev, 0);
1193 /*start the queue so we can respond with error to requests */
1194 /* wakeup anyone waiting for startup complete */
1195 blk_start_queue(skdev->queue);
1196 skdev->gendisk_on = -1;
1197 wake_up_interruptible(&skdev->waitq);
1200 case SKD_DRVR_STATE_RESUMING:
1201 case SKD_DRVR_STATE_STOPPING:
1202 case SKD_DRVR_STATE_SYNCING:
1203 case SKD_DRVR_STATE_FAULT:
1204 case SKD_DRVR_STATE_DISAPPEARED:
1210 static int skd_start_timer(struct skd_device *skdev)
1214 init_timer(&skdev->timer);
1215 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1217 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1219 pr_err("%s: failed to start timer %d\n",
1224 static void skd_kill_timer(struct skd_device *skdev)
1226 del_timer_sync(&skdev->timer);
1230 *****************************************************************************
1232 *****************************************************************************
1234 static int skd_ioctl_sg_io(struct skd_device *skdev,
1235 fmode_t mode, void __user *argp);
1236 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1237 struct skd_sg_io *sksgio);
1238 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1239 struct skd_sg_io *sksgio);
1240 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1241 struct skd_sg_io *sksgio);
1242 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1243 struct skd_sg_io *sksgio, int dxfer_dir);
1244 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1245 struct skd_sg_io *sksgio);
1246 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1247 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1248 struct skd_sg_io *sksgio);
1249 static int skd_sg_io_put_status(struct skd_device *skdev,
1250 struct skd_sg_io *sksgio);
1252 static void skd_complete_special(struct skd_device *skdev,
1253 volatile struct fit_completion_entry_v1
1255 volatile struct fit_comp_error_info *skerr,
1256 struct skd_special_context *skspcl);
1258 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1259 uint cmd_in, ulong arg)
1262 struct gendisk *disk = bdev->bd_disk;
1263 struct skd_device *skdev = disk->private_data;
1264 void __user *p = (void *)arg;
1266 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1267 skdev->name, __func__, __LINE__,
1268 disk->disk_name, current->comm, mode, cmd_in, arg);
1270 if (!capable(CAP_SYS_ADMIN))
1274 case SG_SET_TIMEOUT:
1275 case SG_GET_TIMEOUT:
1276 case SG_GET_VERSION_NUM:
1277 rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1280 rc = skd_ioctl_sg_io(skdev, mode, p);
1288 pr_debug("%s:%s:%d %s: completion rc %d\n",
1289 skdev->name, __func__, __LINE__, disk->disk_name, rc);
1293 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1297 struct skd_sg_io sksgio;
1299 memset(&sksgio, 0, sizeof(sksgio));
1302 sksgio.iov = &sksgio.no_iov_iov;
1304 switch (skdev->state) {
1305 case SKD_DRVR_STATE_ONLINE:
1306 case SKD_DRVR_STATE_BUSY_IMMINENT:
1310 pr_debug("%s:%s:%d drive not online\n",
1311 skdev->name, __func__, __LINE__);
1316 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1320 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1324 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1328 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1332 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1336 rc = skd_sg_io_await(skdev, &sksgio);
1340 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1344 rc = skd_sg_io_put_status(skdev, &sksgio);
1351 skd_sg_io_release_skspcl(skdev, &sksgio);
1353 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1358 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1359 struct skd_sg_io *sksgio)
1361 struct sg_io_hdr *sgp = &sksgio->sg;
1364 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1365 pr_debug("%s:%s:%d access sg failed %p\n",
1366 skdev->name, __func__, __LINE__, sksgio->argp);
1370 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1371 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1372 skdev->name, __func__, __LINE__, sksgio->argp);
1376 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1377 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1378 skdev->name, __func__, __LINE__, sgp->interface_id);
1382 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1383 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1384 skdev->name, __func__, __LINE__, sgp->cmd_len);
1388 if (sgp->iovec_count > 256) {
1389 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1390 skdev->name, __func__, __LINE__, sgp->iovec_count);
1394 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1395 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1396 skdev->name, __func__, __LINE__, sgp->dxfer_len);
1400 switch (sgp->dxfer_direction) {
1405 case SG_DXFER_TO_DEV:
1409 case SG_DXFER_FROM_DEV:
1410 case SG_DXFER_TO_FROM_DEV:
1415 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1416 skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1420 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1421 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1422 skdev->name, __func__, __LINE__, sgp->cmdp);
1426 if (sgp->mx_sb_len != 0) {
1427 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1428 pr_debug("%s:%s:%d access sbp failed %p\n",
1429 skdev->name, __func__, __LINE__, sgp->sbp);
1434 if (sgp->iovec_count == 0) {
1435 sksgio->iov[0].iov_base = sgp->dxferp;
1436 sksgio->iov[0].iov_len = sgp->dxfer_len;
1438 sksgio->dxfer_len = sgp->dxfer_len;
1440 struct sg_iovec *iov;
1441 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1442 size_t iov_data_len;
1444 iov = kmalloc(nbytes, GFP_KERNEL);
1446 pr_debug("%s:%s:%d alloc iovec failed %d\n",
1447 skdev->name, __func__, __LINE__,
1452 sksgio->iovcnt = sgp->iovec_count;
1454 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1455 pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1456 skdev->name, __func__, __LINE__, sgp->dxferp);
1461 * Sum up the vecs, making sure they don't overflow
1464 for (i = 0; i < sgp->iovec_count; i++) {
1465 if (iov_data_len + iov[i].iov_len < iov_data_len)
1467 iov_data_len += iov[i].iov_len;
1470 /* SG_IO howto says that the shorter of the two wins */
1471 if (sgp->dxfer_len < iov_data_len) {
1472 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1475 sksgio->dxfer_len = sgp->dxfer_len;
1477 sksgio->dxfer_len = iov_data_len;
1480 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1481 struct sg_iovec *iov = sksgio->iov;
1482 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1483 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1484 pr_debug("%s:%s:%d access data failed %p/%d\n",
1485 skdev->name, __func__, __LINE__,
1486 iov->iov_base, (int)iov->iov_len);
1495 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1496 struct skd_sg_io *sksgio)
1498 struct skd_special_context *skspcl = NULL;
1504 spin_lock_irqsave(&skdev->lock, flags);
1505 skspcl = skdev->skspcl_free_list;
1506 if (skspcl != NULL) {
1507 skdev->skspcl_free_list =
1508 (struct skd_special_context *)skspcl->req.next;
1509 skspcl->req.id += SKD_ID_INCR;
1510 skspcl->req.state = SKD_REQ_STATE_SETUP;
1511 skspcl->orphaned = 0;
1512 skspcl->req.n_sg = 0;
1514 spin_unlock_irqrestore(&skdev->lock, flags);
1516 if (skspcl != NULL) {
1521 pr_debug("%s:%s:%d blocking\n",
1522 skdev->name, __func__, __LINE__);
1524 rc = wait_event_interruptible_timeout(
1526 (skdev->skspcl_free_list != NULL),
1527 msecs_to_jiffies(sksgio->sg.timeout));
1529 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1530 skdev->name, __func__, __LINE__, rc);
1540 * If we get here rc > 0 meaning the timeout to
1541 * wait_event_interruptible_timeout() had time left, hence the
1542 * sought event -- non-empty free list -- happened.
1543 * Retry the allocation.
1546 sksgio->skspcl = skspcl;
1551 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1552 struct skd_request_context *skreq,
1555 u32 resid = dxfer_len;
1558 * The DMA engine must have aligned addresses and byte counts.
1560 resid += (-resid) & 3;
1561 skreq->sg_byte_count = resid;
1566 u32 nbytes = PAGE_SIZE;
1567 u32 ix = skreq->n_sg;
1568 struct scatterlist *sg = &skreq->sg[ix];
1569 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1575 page = alloc_page(GFP_KERNEL);
1579 sg_set_page(sg, page, nbytes, 0);
1581 /* TODO: This should be going through a pci_???()
1582 * routine to do proper mapping. */
1583 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1584 sksg->byte_count = nbytes;
1586 sksg->host_side_addr = sg_phys(sg);
1588 sksg->dev_side_addr = 0;
1589 sksg->next_desc_ptr = skreq->sksg_dma_address +
1590 (ix + 1) * sizeof(*sksg);
1596 if (skreq->n_sg > 0) {
1597 u32 ix = skreq->n_sg - 1;
1598 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1600 sksg->control = FIT_SGD_CONTROL_LAST;
1601 sksg->next_desc_ptr = 0;
1604 if (unlikely(skdev->dbg_level > 1)) {
1607 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1608 skdev->name, __func__, __LINE__,
1609 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1610 for (i = 0; i < skreq->n_sg; i++) {
1611 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1613 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1614 "addr=0x%llx next=0x%llx\n",
1615 skdev->name, __func__, __LINE__,
1616 i, sgd->byte_count, sgd->control,
1617 sgd->host_side_addr, sgd->next_desc_ptr);
1624 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1625 struct skd_sg_io *sksgio)
1627 struct skd_special_context *skspcl = sksgio->skspcl;
1628 struct skd_request_context *skreq = &skspcl->req;
1629 u32 dxfer_len = sksgio->dxfer_len;
1632 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1634 * Eventually, errors or not, skd_release_special() is called
1635 * to recover allocations including partial allocations.
1640 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1641 struct skd_sg_io *sksgio, int dxfer_dir)
1643 struct skd_special_context *skspcl = sksgio->skspcl;
1645 struct sg_iovec curiov;
1649 u32 resid = sksgio->dxfer_len;
1653 curiov.iov_base = NULL;
1655 if (dxfer_dir != sksgio->sg.dxfer_direction) {
1656 if (dxfer_dir != SG_DXFER_TO_DEV ||
1657 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1662 u32 nbytes = PAGE_SIZE;
1664 if (curiov.iov_len == 0) {
1665 curiov = sksgio->iov[iov_ix++];
1671 page = sg_page(&skspcl->req.sg[sksg_ix++]);
1672 bufp = page_address(page);
1673 buf_len = PAGE_SIZE;
1676 nbytes = min_t(u32, nbytes, resid);
1677 nbytes = min_t(u32, nbytes, curiov.iov_len);
1678 nbytes = min_t(u32, nbytes, buf_len);
1680 if (dxfer_dir == SG_DXFER_TO_DEV)
1681 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1683 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1689 curiov.iov_len -= nbytes;
1690 curiov.iov_base += nbytes;
1697 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1698 struct skd_sg_io *sksgio)
1700 struct skd_special_context *skspcl = sksgio->skspcl;
1701 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1702 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1704 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1706 /* Initialize the FIT msg header */
1707 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1708 fmh->num_protocol_cmds_coalesced = 1;
1710 /* Initialize the SCSI request */
1711 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1712 scsi_req->hdr.sg_list_dma_address =
1713 cpu_to_be64(skspcl->req.sksg_dma_address);
1714 scsi_req->hdr.tag = skspcl->req.id;
1715 scsi_req->hdr.sg_list_len_bytes =
1716 cpu_to_be32(skspcl->req.sg_byte_count);
1717 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1719 skspcl->req.state = SKD_REQ_STATE_BUSY;
1720 skd_send_special_fitmsg(skdev, skspcl);
1725 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1727 unsigned long flags;
1730 rc = wait_event_interruptible_timeout(skdev->waitq,
1731 (sksgio->skspcl->req.state !=
1732 SKD_REQ_STATE_BUSY),
1733 msecs_to_jiffies(sksgio->sg.
1736 spin_lock_irqsave(&skdev->lock, flags);
1738 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1739 pr_debug("%s:%s:%d skspcl %p aborted\n",
1740 skdev->name, __func__, __LINE__, sksgio->skspcl);
1742 /* Build check cond, sense and let command finish. */
1743 /* For a timeout, we must fabricate completion and sense
1744 * data to complete the command */
1745 sksgio->skspcl->req.completion.status =
1746 SAM_STAT_CHECK_CONDITION;
1748 memset(&sksgio->skspcl->req.err_info, 0,
1749 sizeof(sksgio->skspcl->req.err_info));
1750 sksgio->skspcl->req.err_info.type = 0x70;
1751 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1752 sksgio->skspcl->req.err_info.code = 0x44;
1753 sksgio->skspcl->req.err_info.qual = 0;
1755 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1756 /* No longer on the adapter. We finish. */
1759 /* Something's gone wrong. Still busy. Timeout or
1760 * user interrupted (control-C). Mark as an orphan
1761 * so it will be disposed when completed. */
1762 sksgio->skspcl->orphaned = 1;
1763 sksgio->skspcl = NULL;
1765 pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1766 skdev->name, __func__, __LINE__,
1767 sksgio, sksgio->sg.timeout);
1770 pr_debug("%s:%s:%d cntlc %p\n",
1771 skdev->name, __func__, __LINE__, sksgio);
1776 spin_unlock_irqrestore(&skdev->lock, flags);
1781 static int skd_sg_io_put_status(struct skd_device *skdev,
1782 struct skd_sg_io *sksgio)
1784 struct sg_io_hdr *sgp = &sksgio->sg;
1785 struct skd_special_context *skspcl = sksgio->skspcl;
1788 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1790 sgp->status = skspcl->req.completion.status;
1791 resid = sksgio->dxfer_len - nb;
1793 sgp->masked_status = sgp->status & STATUS_MASK;
1794 sgp->msg_status = 0;
1795 sgp->host_status = 0;
1796 sgp->driver_status = 0;
1798 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1799 sgp->info |= SG_INFO_CHECK;
1801 pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1802 skdev->name, __func__, __LINE__,
1803 sgp->status, sgp->masked_status, sgp->resid);
1805 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1806 if (sgp->mx_sb_len > 0) {
1807 struct fit_comp_error_info *ei = &skspcl->req.err_info;
1808 u32 nbytes = sizeof(*ei);
1810 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1812 sgp->sb_len_wr = nbytes;
1814 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1815 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1816 skdev->name, __func__, __LINE__,
1823 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1824 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1825 skdev->name, __func__, __LINE__, sksgio->argp);
1832 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1833 struct skd_sg_io *sksgio)
1835 struct skd_special_context *skspcl = sksgio->skspcl;
1837 if (skspcl != NULL) {
1840 sksgio->skspcl = NULL;
1842 spin_lock_irqsave(&skdev->lock, flags);
1843 skd_release_special(skdev, skspcl);
1844 spin_unlock_irqrestore(&skdev->lock, flags);
1851 *****************************************************************************
1852 * INTERNAL REQUESTS -- generated by driver itself
1853 *****************************************************************************
1856 static int skd_format_internal_skspcl(struct skd_device *skdev)
1858 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1859 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1860 struct fit_msg_hdr *fmh;
1861 uint64_t dma_address;
1862 struct skd_scsi_request *scsi;
1864 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1865 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1866 fmh->num_protocol_cmds_coalesced = 1;
1868 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1869 memset(scsi, 0, sizeof(*scsi));
1870 dma_address = skspcl->req.sksg_dma_address;
1871 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1872 sgd->control = FIT_SGD_CONTROL_LAST;
1873 sgd->byte_count = 0;
1874 sgd->host_side_addr = skspcl->db_dma_address;
1875 sgd->dev_side_addr = 0;
1876 sgd->next_desc_ptr = 0LL;
1881 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1883 static void skd_send_internal_skspcl(struct skd_device *skdev,
1884 struct skd_special_context *skspcl,
1887 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1888 struct skd_scsi_request *scsi;
1889 unsigned char *buf = skspcl->data_buf;
1892 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1894 * A refresh is already in progress.
1895 * Just wait for it to finish.
1899 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1900 skspcl->req.state = SKD_REQ_STATE_BUSY;
1901 skspcl->req.id += SKD_ID_INCR;
1903 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1904 scsi->hdr.tag = skspcl->req.id;
1906 memset(scsi->cdb, 0, sizeof(scsi->cdb));
1909 case TEST_UNIT_READY:
1910 scsi->cdb[0] = TEST_UNIT_READY;
1911 sgd->byte_count = 0;
1912 scsi->hdr.sg_list_len_bytes = 0;
1916 scsi->cdb[0] = READ_CAPACITY;
1917 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1918 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1922 scsi->cdb[0] = INQUIRY;
1923 scsi->cdb[1] = 0x01; /* evpd */
1924 scsi->cdb[2] = 0x80; /* serial number page */
1925 scsi->cdb[4] = 0x10;
1926 sgd->byte_count = 16;
1927 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1930 case SYNCHRONIZE_CACHE:
1931 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1932 sgd->byte_count = 0;
1933 scsi->hdr.sg_list_len_bytes = 0;
1937 scsi->cdb[0] = WRITE_BUFFER;
1938 scsi->cdb[1] = 0x02;
1939 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1940 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1941 sgd->byte_count = WR_BUF_SIZE;
1942 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1943 /* fill incrementing byte pattern */
1944 for (i = 0; i < sgd->byte_count; i++)
1949 scsi->cdb[0] = READ_BUFFER;
1950 scsi->cdb[1] = 0x02;
1951 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1952 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1953 sgd->byte_count = WR_BUF_SIZE;
1954 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1955 memset(skspcl->data_buf, 0, sgd->byte_count);
1959 SKD_ASSERT("Don't know what to send");
1963 skd_send_special_fitmsg(skdev, skspcl);
1966 static void skd_refresh_device_data(struct skd_device *skdev)
1968 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1970 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1973 static int skd_chk_read_buf(struct skd_device *skdev,
1974 struct skd_special_context *skspcl)
1976 unsigned char *buf = skspcl->data_buf;
1979 /* check for incrementing byte pattern */
1980 for (i = 0; i < WR_BUF_SIZE; i++)
1981 if (buf[i] != (i & 0xFF))
1987 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1988 u8 code, u8 qual, u8 fruc)
1990 /* If the check condition is of special interest, log a message */
1991 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1992 && (code == 0x04) && (qual == 0x06)) {
1993 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1994 "ascq/fruc %02x/%02x/%02x/%02x\n",
1995 skd_name(skdev), key, code, qual, fruc);
1999 static void skd_complete_internal(struct skd_device *skdev,
2000 volatile struct fit_completion_entry_v1
2002 volatile struct fit_comp_error_info *skerr,
2003 struct skd_special_context *skspcl)
2005 u8 *buf = skspcl->data_buf;
2008 struct skd_scsi_request *scsi =
2009 (struct skd_scsi_request *)&skspcl->msg_buf[64];
2011 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
2013 pr_debug("%s:%s:%d complete internal %x\n",
2014 skdev->name, __func__, __LINE__, scsi->cdb[0]);
2016 skspcl->req.completion = *skcomp;
2017 skspcl->req.state = SKD_REQ_STATE_IDLE;
2018 skspcl->req.id += SKD_ID_INCR;
2020 status = skspcl->req.completion.status;
2022 skd_log_check_status(skdev, status, skerr->key, skerr->code,
2023 skerr->qual, skerr->fruc);
2025 switch (scsi->cdb[0]) {
2026 case TEST_UNIT_READY:
2027 if (status == SAM_STAT_GOOD)
2028 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2029 else if ((status == SAM_STAT_CHECK_CONDITION) &&
2030 (skerr->key == MEDIUM_ERROR))
2031 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2033 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2034 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
2035 skdev->name, __func__, __LINE__,
2039 pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
2040 skdev->name, __func__, __LINE__);
2041 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2046 if (status == SAM_STAT_GOOD)
2047 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
2049 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2050 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
2051 skdev->name, __func__, __LINE__,
2055 pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2056 skdev->name, __func__, __LINE__);
2057 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2062 if (status == SAM_STAT_GOOD) {
2063 if (skd_chk_read_buf(skdev, skspcl) == 0)
2064 skd_send_internal_skspcl(skdev, skspcl,
2068 "(%s):*** W/R Buffer mismatch %d ***\n",
2069 skd_name(skdev), skdev->connect_retries);
2070 if (skdev->connect_retries <
2071 SKD_MAX_CONNECT_RETRIES) {
2072 skdev->connect_retries++;
2073 skd_soft_reset(skdev);
2076 "(%s): W/R Buffer Connect Error\n",
2083 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2084 pr_debug("%s:%s:%d "
2085 "read buffer failed, don't send anymore state 0x%x\n",
2086 skdev->name, __func__, __LINE__,
2090 pr_debug("%s:%s:%d "
2091 "**** read buffer failed, retry skerr\n",
2092 skdev->name, __func__, __LINE__);
2093 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2098 skdev->read_cap_is_valid = 0;
2099 if (status == SAM_STAT_GOOD) {
2100 skdev->read_cap_last_lba =
2101 (buf[0] << 24) | (buf[1] << 16) |
2102 (buf[2] << 8) | buf[3];
2103 skdev->read_cap_blocksize =
2104 (buf[4] << 24) | (buf[5] << 16) |
2105 (buf[6] << 8) | buf[7];
2107 pr_debug("%s:%s:%d last lba %d, bs %d\n",
2108 skdev->name, __func__, __LINE__,
2109 skdev->read_cap_last_lba,
2110 skdev->read_cap_blocksize);
2112 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2114 skdev->read_cap_is_valid = 1;
2116 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2117 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2118 (skerr->key == MEDIUM_ERROR)) {
2119 skdev->read_cap_last_lba = ~0;
2120 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2121 pr_debug("%s:%s:%d "
2122 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2123 skdev->name, __func__, __LINE__);
2124 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2126 pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2127 skdev->name, __func__, __LINE__);
2128 skd_send_internal_skspcl(skdev, skspcl,
2134 skdev->inquiry_is_valid = 0;
2135 if (status == SAM_STAT_GOOD) {
2136 skdev->inquiry_is_valid = 1;
2138 for (i = 0; i < 12; i++)
2139 skdev->inq_serial_num[i] = buf[i + 4];
2140 skdev->inq_serial_num[12] = 0;
2143 if (skd_unquiesce_dev(skdev) < 0)
2144 pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2145 skdev->name, __func__, __LINE__);
2146 /* connection is complete */
2147 skdev->connect_retries = 0;
2150 case SYNCHRONIZE_CACHE:
2151 if (status == SAM_STAT_GOOD)
2152 skdev->sync_done = 1;
2154 skdev->sync_done = -1;
2155 wake_up_interruptible(&skdev->waitq);
2159 SKD_ASSERT("we didn't send this");
2164 *****************************************************************************
2166 *****************************************************************************
2169 static void skd_send_fitmsg(struct skd_device *skdev,
2170 struct skd_fitmsg_context *skmsg)
2173 struct fit_msg_hdr *fmh;
2175 pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2176 skdev->name, __func__, __LINE__,
2177 skmsg->mb_dma_address, skdev->in_flight);
2178 pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2179 skdev->name, __func__, __LINE__,
2180 skmsg->msg_buf, skmsg->offset);
2182 qcmd = skmsg->mb_dma_address;
2183 qcmd |= FIT_QCMD_QID_NORMAL;
2185 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2186 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2188 if (unlikely(skdev->dbg_level > 1)) {
2189 u8 *bp = (u8 *)skmsg->msg_buf;
2191 for (i = 0; i < skmsg->length; i += 8) {
2192 pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
2193 "%02x %02x %02x %02x\n",
2194 skdev->name, __func__, __LINE__,
2195 i, bp[i + 0], bp[i + 1], bp[i + 2],
2196 bp[i + 3], bp[i + 4], bp[i + 5],
2197 bp[i + 6], bp[i + 7]);
2203 if (skmsg->length > 256)
2204 qcmd |= FIT_QCMD_MSGSIZE_512;
2205 else if (skmsg->length > 128)
2206 qcmd |= FIT_QCMD_MSGSIZE_256;
2207 else if (skmsg->length > 64)
2208 qcmd |= FIT_QCMD_MSGSIZE_128;
2211 * This makes no sense because the FIT msg header is
2212 * 64 bytes. If the msg is only 64 bytes long it has
2215 qcmd |= FIT_QCMD_MSGSIZE_64;
2217 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2221 static void skd_send_special_fitmsg(struct skd_device *skdev,
2222 struct skd_special_context *skspcl)
2226 if (unlikely(skdev->dbg_level > 1)) {
2227 u8 *bp = (u8 *)skspcl->msg_buf;
2230 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2231 pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
2232 "%02x %02x %02x %02x\n",
2233 skdev->name, __func__, __LINE__, i,
2234 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
2235 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
2240 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2241 skdev->name, __func__, __LINE__,
2242 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2243 skspcl->req.sksg_dma_address);
2244 for (i = 0; i < skspcl->req.n_sg; i++) {
2245 struct fit_sg_descriptor *sgd =
2246 &skspcl->req.sksg_list[i];
2248 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
2249 "addr=0x%llx next=0x%llx\n",
2250 skdev->name, __func__, __LINE__,
2251 i, sgd->byte_count, sgd->control,
2252 sgd->host_side_addr, sgd->next_desc_ptr);
2257 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2258 * and one 64-byte SSDI command.
2260 qcmd = skspcl->mb_dma_address;
2261 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2263 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2267 *****************************************************************************
2269 *****************************************************************************
2272 static void skd_complete_other(struct skd_device *skdev,
2273 volatile struct fit_completion_entry_v1 *skcomp,
2274 volatile struct fit_comp_error_info *skerr);
2283 enum skd_check_status_action action;
2286 static struct sns_info skd_chkstat_table[] = {
2288 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2289 SKD_CHECK_STATUS_REPORT_GOOD },
2292 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2293 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2294 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2295 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2296 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2297 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2299 /* Retry (with limits) */
2300 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2301 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2302 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2303 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2304 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2305 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2306 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2307 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2309 /* Busy (or about to be) */
2310 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2311 SKD_CHECK_STATUS_BUSY_IMMINENT },
2315 * Look up status and sense data to decide how to handle the error
2317 * mask says which fields must match e.g., mask=0x18 means check
2318 * type and stat, ignore key, asc, ascq.
2321 static enum skd_check_status_action
2322 skd_check_status(struct skd_device *skdev,
2323 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2327 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2328 skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2331 pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2332 skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2333 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2335 /* Does the info match an entry in the good category? */
2336 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2337 for (i = 0; i < n; i++) {
2338 struct sns_info *sns = &skd_chkstat_table[i];
2340 if (sns->mask & 0x10)
2341 if (skerr->type != sns->type)
2344 if (sns->mask & 0x08)
2345 if (cmp_status != sns->stat)
2348 if (sns->mask & 0x04)
2349 if (skerr->key != sns->key)
2352 if (sns->mask & 0x02)
2353 if (skerr->code != sns->asc)
2356 if (sns->mask & 0x01)
2357 if (skerr->qual != sns->ascq)
2360 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2361 pr_err("(%s): SMART Alert: sense key/asc/ascq "
2363 skd_name(skdev), skerr->key,
2364 skerr->code, skerr->qual);
2369 /* No other match, so nonzero status means error,
2370 * zero status means good
2373 pr_debug("%s:%s:%d status check: error\n",
2374 skdev->name, __func__, __LINE__);
2375 return SKD_CHECK_STATUS_REPORT_ERROR;
2378 pr_debug("%s:%s:%d status check good default\n",
2379 skdev->name, __func__, __LINE__);
2380 return SKD_CHECK_STATUS_REPORT_GOOD;
2383 static void skd_resolve_req_exception(struct skd_device *skdev,
2384 struct skd_request_context *skreq)
2386 u8 cmp_status = skreq->completion.status;
2388 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2389 case SKD_CHECK_STATUS_REPORT_GOOD:
2390 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2391 skd_end_request(skdev, skreq, 0);
2394 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2395 skd_log_skreq(skdev, skreq, "retry(busy)");
2396 blk_requeue_request(skdev->queue, skreq->req);
2397 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2398 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2399 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2400 skd_quiesce_dev(skdev);
2403 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2404 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2405 skd_log_skreq(skdev, skreq, "retry");
2406 blk_requeue_request(skdev->queue, skreq->req);
2409 /* fall through to report error */
2411 case SKD_CHECK_STATUS_REPORT_ERROR:
2413 skd_end_request(skdev, skreq, -EIO);
2418 /* assume spinlock is already held */
2419 static void skd_release_skreq(struct skd_device *skdev,
2420 struct skd_request_context *skreq)
2423 struct skd_fitmsg_context *skmsg;
2428 * Reclaim the FIT msg buffer if this is
2429 * the first of the requests it carried to
2430 * be completed. The FIT msg buffer used to
2431 * send this request cannot be reused until
2432 * we are sure the s1120 card has copied
2433 * it to its memory. The FIT msg might have
2434 * contained several requests. As soon as
2435 * any of them are completed we know that
2436 * the entire FIT msg was transferred.
2437 * Only the first completed request will
2438 * match the FIT msg buffer id. The FIT
2439 * msg buffer id is immediately updated.
2440 * When subsequent requests complete the FIT
2441 * msg buffer id won't match, so we know
2442 * quite cheaply that it is already done.
2444 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2445 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2447 skmsg = &skdev->skmsg_table[msg_slot];
2448 if (skmsg->id == skreq->fitmsg_id) {
2449 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2450 SKD_ASSERT(skmsg->outstanding > 0);
2451 skmsg->outstanding--;
2452 if (skmsg->outstanding == 0) {
2453 skmsg->state = SKD_MSG_STATE_IDLE;
2454 skmsg->id += SKD_ID_INCR;
2455 skmsg->next = skdev->skmsg_free_list;
2456 skdev->skmsg_free_list = skmsg;
2461 * Decrease the number of active requests.
2462 * Also decrements the count in the timeout slot.
2464 SKD_ASSERT(skdev->in_flight > 0);
2465 skdev->in_flight -= 1;
2467 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2468 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2469 skdev->timeout_slot[timo_slot] -= 1;
2477 * Reclaim the skd_request_context
2479 skreq->state = SKD_REQ_STATE_IDLE;
2480 skreq->id += SKD_ID_INCR;
2481 skreq->next = skdev->skreq_free_list;
2482 skdev->skreq_free_list = skreq;
2485 #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2487 static void skd_do_inq_page_00(struct skd_device *skdev,
2488 volatile struct fit_completion_entry_v1 *skcomp,
2489 volatile struct fit_comp_error_info *skerr,
2490 uint8_t *cdb, uint8_t *buf)
2492 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2494 /* Caller requested "supported pages". The driver needs to insert
2497 pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2498 skdev->name, __func__, __LINE__);
2500 /* If the device rejected the request because the CDB was
2501 * improperly formed, then just leave.
2503 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2504 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2507 /* Get the amount of space the caller allocated */
2508 max_bytes = (cdb[3] << 8) | cdb[4];
2510 /* Get the number of pages actually returned by the device */
2511 drive_pages = (buf[2] << 8) | buf[3];
2512 drive_bytes = drive_pages + 4;
2513 new_size = drive_pages + 1;
2515 /* Supported pages must be in numerical order, so find where
2516 * the driver page needs to be inserted into the list of
2517 * pages returned by the device.
2519 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2520 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2521 return; /* Device using this page code. abort */
2522 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2526 if (insert_pt < max_bytes) {
2529 /* Shift everything up one byte to make room. */
2530 for (u = new_size + 3; u > insert_pt; u--)
2531 buf[u] = buf[u - 1];
2532 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2534 /* SCSI byte order increment of num_returned_bytes by 1 */
2535 skcomp->num_returned_bytes =
2536 be32_to_cpu(skcomp->num_returned_bytes) + 1;
2537 skcomp->num_returned_bytes =
2538 be32_to_cpu(skcomp->num_returned_bytes);
2541 /* update page length field to reflect the driver's page too */
2542 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2543 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2546 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2552 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2555 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2557 pci_bus_speed = linksta & 0xF;
2558 pci_lanes = (linksta & 0x3F0) >> 4;
2560 *speed = STEC_LINK_UNKNOWN;
2565 switch (pci_bus_speed) {
2567 *speed = STEC_LINK_2_5GTS;
2570 *speed = STEC_LINK_5GTS;
2573 *speed = STEC_LINK_8GTS;
2576 *speed = STEC_LINK_UNKNOWN;
2580 if (pci_lanes <= 0x20)
2586 static void skd_do_inq_page_da(struct skd_device *skdev,
2587 volatile struct fit_completion_entry_v1 *skcomp,
2588 volatile struct fit_comp_error_info *skerr,
2589 uint8_t *cdb, uint8_t *buf)
2591 struct pci_dev *pdev = skdev->pdev;
2593 struct driver_inquiry_data inq;
2596 pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2597 skdev->name, __func__, __LINE__);
2599 memset(&inq, 0, sizeof(inq));
2601 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2603 skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2604 inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2605 inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2606 inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2608 pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2609 inq.pcie_vendor_id = cpu_to_be16(val);
2611 pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2612 inq.pcie_device_id = cpu_to_be16(val);
2614 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2615 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2617 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2618 inq.pcie_subsystem_device_id = cpu_to_be16(val);
2620 /* Driver version, fixed lenth, padded with spaces on the right */
2621 inq.driver_version_length = sizeof(inq.driver_version);
2622 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2623 memcpy(inq.driver_version, DRV_VER_COMPL,
2624 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2626 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2628 /* Clear the error set by the device */
2629 skcomp->status = SAM_STAT_GOOD;
2630 memset((void *)skerr, 0, sizeof(*skerr));
2632 /* copy response into output buffer */
2633 max_bytes = (cdb[3] << 8) | cdb[4];
2634 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2636 skcomp->num_returned_bytes =
2637 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2640 static void skd_do_driver_inq(struct skd_device *skdev,
2641 volatile struct fit_completion_entry_v1 *skcomp,
2642 volatile struct fit_comp_error_info *skerr,
2643 uint8_t *cdb, uint8_t *buf)
2647 else if (cdb[0] != INQUIRY)
2648 return; /* Not an INQUIRY */
2649 else if ((cdb[1] & 1) == 0)
2650 return; /* EVPD not set */
2651 else if (cdb[2] == 0)
2652 /* Need to add driver's page to supported pages list */
2653 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2654 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2655 /* Caller requested driver's page */
2656 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2659 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2668 static void skd_process_scsi_inq(struct skd_device *skdev,
2669 volatile struct fit_completion_entry_v1
2671 volatile struct fit_comp_error_info *skerr,
2672 struct skd_special_context *skspcl)
2675 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2676 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2678 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2679 skspcl->req.sg_data_dir);
2680 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2683 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2687 static int skd_isr_completion_posted(struct skd_device *skdev,
2688 int limit, int *enqueued)
2690 volatile struct fit_completion_entry_v1 *skcmp = NULL;
2691 volatile struct fit_comp_error_info *skerr;
2694 struct skd_request_context *skreq;
2703 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2705 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2706 cmp_cycle = skcmp->cycle;
2707 cmp_cntxt = skcmp->tag;
2708 cmp_status = skcmp->status;
2709 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2711 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2713 pr_debug("%s:%s:%d "
2714 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2715 "busy=%d rbytes=0x%x proto=%d\n",
2716 skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2717 skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2718 skdev->in_flight, cmp_bytes, skdev->proto_ver);
2720 if (cmp_cycle != skdev->skcomp_cycle) {
2721 pr_debug("%s:%s:%d end of completions\n",
2722 skdev->name, __func__, __LINE__);
2726 * Update the completion queue head index and possibly
2727 * the completion cycle count. 8-bit wrap-around.
2730 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2731 skdev->skcomp_ix = 0;
2732 skdev->skcomp_cycle++;
2736 * The command context is a unique 32-bit ID. The low order
2737 * bits help locate the request. The request is usually a
2738 * r/w request (see skd_start() above) or a special request.
2741 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2743 /* Is this other than a r/w request? */
2744 if (req_slot >= skdev->num_req_context) {
2746 * This is not a completion for a r/w request.
2748 skd_complete_other(skdev, skcmp, skerr);
2752 skreq = &skdev->skreq_table[req_slot];
2755 * Make sure the request ID for the slot matches.
2757 if (skreq->id != req_id) {
2758 pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2759 skdev->name, __func__, __LINE__,
2762 u16 new_id = cmp_cntxt;
2763 pr_err("(%s): Completion mismatch "
2764 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2765 skd_name(skdev), req_id,
2772 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2774 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2775 pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2776 skdev->name, __func__, __LINE__,
2778 /* a previously timed out command can
2779 * now be cleaned up */
2780 skd_release_skreq(skdev, skreq);
2784 skreq->completion = *skcmp;
2785 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2786 skreq->err_info = *skerr;
2787 skd_log_check_status(skdev, cmp_status, skerr->key,
2788 skerr->code, skerr->qual,
2791 /* Release DMA resources for the request. */
2792 if (skreq->n_sg > 0)
2793 skd_postop_sg_list(skdev, skreq);
2796 pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2797 "req=0x%x req_id=0x%x\n",
2798 skdev->name, __func__, __LINE__,
2799 skreq, skreq->id, req_id);
2802 * Capture the outcome and post it back to the
2805 if (likely(cmp_status == SAM_STAT_GOOD))
2806 skd_end_request(skdev, skreq, 0);
2808 skd_resolve_req_exception(skdev, skreq);
2812 * Release the skreq, its FIT msg (if one), timeout slot,
2815 skd_release_skreq(skdev, skreq);
2817 /* skd_isr_comp_limit equal zero means no limit */
2819 if (++processed >= limit) {
2826 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2827 && (skdev->in_flight) == 0) {
2828 skdev->state = SKD_DRVR_STATE_PAUSED;
2829 wake_up_interruptible(&skdev->waitq);
2835 static void skd_complete_other(struct skd_device *skdev,
2836 volatile struct fit_completion_entry_v1 *skcomp,
2837 volatile struct fit_comp_error_info *skerr)
2842 struct skd_special_context *skspcl;
2844 req_id = skcomp->tag;
2845 req_table = req_id & SKD_ID_TABLE_MASK;
2846 req_slot = req_id & SKD_ID_SLOT_MASK;
2848 pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2849 skdev->name, __func__, __LINE__,
2850 req_table, req_id, req_slot);
2853 * Based on the request id, determine how to dispatch this completion.
2854 * This swich/case is finding the good cases and forwarding the
2855 * completion entry. Errors are reported below the switch.
2857 switch (req_table) {
2858 case SKD_ID_RW_REQUEST:
2860 * The caller, skd_completion_posted_isr() above,
2861 * handles r/w requests. The only way we get here
2862 * is if the req_slot is out of bounds.
2866 case SKD_ID_SPECIAL_REQUEST:
2868 * Make sure the req_slot is in bounds and that the id
2871 if (req_slot < skdev->n_special) {
2872 skspcl = &skdev->skspcl_table[req_slot];
2873 if (skspcl->req.id == req_id &&
2874 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2875 skd_complete_special(skdev,
2876 skcomp, skerr, skspcl);
2882 case SKD_ID_INTERNAL:
2883 if (req_slot == 0) {
2884 skspcl = &skdev->internal_skspcl;
2885 if (skspcl->req.id == req_id &&
2886 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2887 skd_complete_internal(skdev,
2888 skcomp, skerr, skspcl);
2894 case SKD_ID_FIT_MSG:
2896 * These id's should never appear in a completion record.
2902 * These id's should never appear anywhere;
2908 * If we get here it is a bad or stale id.
2912 static void skd_complete_special(struct skd_device *skdev,
2913 volatile struct fit_completion_entry_v1
2915 volatile struct fit_comp_error_info *skerr,
2916 struct skd_special_context *skspcl)
2918 pr_debug("%s:%s:%d completing special request %p\n",
2919 skdev->name, __func__, __LINE__, skspcl);
2920 if (skspcl->orphaned) {
2921 /* Discard orphaned request */
2922 /* ?: Can this release directly or does it need
2923 * to use a worker? */
2924 pr_debug("%s:%s:%d release orphaned %p\n",
2925 skdev->name, __func__, __LINE__, skspcl);
2926 skd_release_special(skdev, skspcl);
2930 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2932 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2933 skspcl->req.completion = *skcomp;
2934 skspcl->req.err_info = *skerr;
2936 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2937 skerr->code, skerr->qual, skerr->fruc);
2939 wake_up_interruptible(&skdev->waitq);
2942 /* assume spinlock is already held */
2943 static void skd_release_special(struct skd_device *skdev,
2944 struct skd_special_context *skspcl)
2946 int i, was_depleted;
2948 for (i = 0; i < skspcl->req.n_sg; i++) {
2949 struct page *page = sg_page(&skspcl->req.sg[i]);
2953 was_depleted = (skdev->skspcl_free_list == NULL);
2955 skspcl->req.state = SKD_REQ_STATE_IDLE;
2956 skspcl->req.id += SKD_ID_INCR;
2958 (struct skd_request_context *)skdev->skspcl_free_list;
2959 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2962 pr_debug("%s:%s:%d skspcl was depleted\n",
2963 skdev->name, __func__, __LINE__);
2964 /* Free list was depleted. Their might be waiters. */
2965 wake_up_interruptible(&skdev->waitq);
2969 static void skd_reset_skcomp(struct skd_device *skdev)
2972 struct fit_completion_entry_v1 *skcomp;
2974 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2975 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2977 memset(skdev->skcomp_table, 0, nbytes);
2979 skdev->skcomp_ix = 0;
2980 skdev->skcomp_cycle = 1;
2984 *****************************************************************************
2986 *****************************************************************************
2988 static void skd_completion_worker(struct work_struct *work)
2990 struct skd_device *skdev =
2991 container_of(work, struct skd_device, completion_worker);
2992 unsigned long flags;
2993 int flush_enqueued = 0;
2995 spin_lock_irqsave(&skdev->lock, flags);
2998 * pass in limit=0, which means no limit..
2999 * process everything in compq
3001 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
3002 skd_request_fn(skdev->queue);
3004 spin_unlock_irqrestore(&skdev->lock, flags);
3007 static void skd_isr_msg_from_dev(struct skd_device *skdev);
3010 static skd_isr(int irq, void *ptr)
3012 struct skd_device *skdev;
3017 int flush_enqueued = 0;
3019 skdev = (struct skd_device *)ptr;
3020 spin_lock(&skdev->lock);
3023 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3025 ack = FIT_INT_DEF_MASK;
3028 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
3029 skdev->name, __func__, __LINE__, intstat, ack);
3031 /* As long as there is an int pending on device, keep
3032 * running loop. When none, get out, but if we've never
3033 * done any processing, call completion handler?
3036 /* No interrupts on device, but run the completion
3040 if (likely (skdev->state
3041 == SKD_DRVR_STATE_ONLINE))
3048 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
3050 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
3051 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
3052 if (intstat & FIT_ISH_COMPLETION_POSTED) {
3054 * If we have already deferred completion
3055 * processing, don't bother running it again
3059 skd_isr_completion_posted(skdev,
3060 skd_isr_comp_limit, &flush_enqueued);
3063 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3064 skd_isr_fwstate(skdev);
3065 if (skdev->state == SKD_DRVR_STATE_FAULT ||
3067 SKD_DRVR_STATE_DISAPPEARED) {
3068 spin_unlock(&skdev->lock);
3073 if (intstat & FIT_ISH_MSG_FROM_DEV)
3074 skd_isr_msg_from_dev(skdev);
3078 if (unlikely(flush_enqueued))
3079 skd_request_fn(skdev->queue);
3082 schedule_work(&skdev->completion_worker);
3083 else if (!flush_enqueued)
3084 skd_request_fn(skdev->queue);
3086 spin_unlock(&skdev->lock);
3091 static void skd_drive_fault(struct skd_device *skdev)
3093 skdev->state = SKD_DRVR_STATE_FAULT;
3094 pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3097 static void skd_drive_disappeared(struct skd_device *skdev)
3099 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3100 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3103 static void skd_isr_fwstate(struct skd_device *skdev)
3108 int prev_driver_state = skdev->state;
3110 sense = SKD_READL(skdev, FIT_STATUS);
3111 state = sense & FIT_SR_DRIVE_STATE_MASK;
3113 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3115 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3116 skd_drive_state_to_str(state), state);
3118 skdev->drive_state = state;
3120 switch (skdev->drive_state) {
3121 case FIT_SR_DRIVE_INIT:
3122 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3123 skd_disable_interrupts(skdev);
3126 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3127 skd_recover_requests(skdev, 0);
3128 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3129 skdev->timer_countdown = SKD_STARTING_TIMO;
3130 skdev->state = SKD_DRVR_STATE_STARTING;
3131 skd_soft_reset(skdev);
3134 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3135 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3136 skdev->last_mtd = mtd;
3139 case FIT_SR_DRIVE_ONLINE:
3140 skdev->cur_max_queue_depth = skd_max_queue_depth;
3141 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3142 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3144 skdev->queue_low_water_mark =
3145 skdev->cur_max_queue_depth * 2 / 3 + 1;
3146 if (skdev->queue_low_water_mark < 1)
3147 skdev->queue_low_water_mark = 1;
3149 "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3151 skdev->cur_max_queue_depth,
3152 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3154 skd_refresh_device_data(skdev);
3157 case FIT_SR_DRIVE_BUSY:
3158 skdev->state = SKD_DRVR_STATE_BUSY;
3159 skdev->timer_countdown = SKD_BUSY_TIMO;
3160 skd_quiesce_dev(skdev);
3162 case FIT_SR_DRIVE_BUSY_SANITIZE:
3163 /* set timer for 3 seconds, we'll abort any unfinished
3164 * commands after that expires
3166 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3167 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3168 blk_start_queue(skdev->queue);
3170 case FIT_SR_DRIVE_BUSY_ERASE:
3171 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3172 skdev->timer_countdown = SKD_BUSY_TIMO;
3174 case FIT_SR_DRIVE_OFFLINE:
3175 skdev->state = SKD_DRVR_STATE_IDLE;
3177 case FIT_SR_DRIVE_SOFT_RESET:
3178 switch (skdev->state) {
3179 case SKD_DRVR_STATE_STARTING:
3180 case SKD_DRVR_STATE_RESTARTING:
3181 /* Expected by a caller of skd_soft_reset() */
3184 skdev->state = SKD_DRVR_STATE_RESTARTING;
3188 case FIT_SR_DRIVE_FW_BOOTING:
3189 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3190 skdev->name, __func__, __LINE__, skdev->name);
3191 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3192 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3195 case FIT_SR_DRIVE_DEGRADED:
3196 case FIT_SR_PCIE_LINK_DOWN:
3197 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3200 case FIT_SR_DRIVE_FAULT:
3201 skd_drive_fault(skdev);
3202 skd_recover_requests(skdev, 0);
3203 blk_start_queue(skdev->queue);
3206 /* PCIe bus returned all Fs? */
3208 pr_info("(%s): state=0x%x sense=0x%x\n",
3209 skd_name(skdev), state, sense);
3210 skd_drive_disappeared(skdev);
3211 skd_recover_requests(skdev, 0);
3212 blk_start_queue(skdev->queue);
3216 * Uknown FW State. Wait for a state we recognize.
3220 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3222 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3223 skd_skdev_state_to_str(skdev->state), skdev->state);
3226 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3230 for (i = 0; i < skdev->num_req_context; i++) {
3231 struct skd_request_context *skreq = &skdev->skreq_table[i];
3233 if (skreq->state == SKD_REQ_STATE_BUSY) {
3234 skd_log_skreq(skdev, skreq, "recover");
3236 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3237 SKD_ASSERT(skreq->req != NULL);
3239 /* Release DMA resources for the request. */
3240 if (skreq->n_sg > 0)
3241 skd_postop_sg_list(skdev, skreq);
3244 (unsigned long) ++skreq->req->special <
3246 blk_requeue_request(skdev->queue, skreq->req);
3248 skd_end_request(skdev, skreq, -EIO);
3252 skreq->state = SKD_REQ_STATE_IDLE;
3253 skreq->id += SKD_ID_INCR;
3256 skreq[-1].next = skreq;
3259 skdev->skreq_free_list = skdev->skreq_table;
3261 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3262 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3264 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3265 skd_log_skmsg(skdev, skmsg, "salvaged");
3266 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3267 skmsg->state = SKD_MSG_STATE_IDLE;
3268 skmsg->id += SKD_ID_INCR;
3271 skmsg[-1].next = skmsg;
3274 skdev->skmsg_free_list = skdev->skmsg_table;
3276 for (i = 0; i < skdev->n_special; i++) {
3277 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3279 /* If orphaned, reclaim it because it has already been reported
3280 * to the process as an error (it was just waiting for
3281 * a completion that didn't come, and now it will never come)
3282 * If busy, change to a state that will cause it to error
3283 * out in the wait routine and let it do the normal
3284 * reporting and reclaiming
3286 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3287 if (skspcl->orphaned) {
3288 pr_debug("%s:%s:%d orphaned %p\n",
3289 skdev->name, __func__, __LINE__,
3291 skd_release_special(skdev, skspcl);
3293 pr_debug("%s:%s:%d not orphaned %p\n",
3294 skdev->name, __func__, __LINE__,
3296 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3300 skdev->skspcl_free_list = skdev->skspcl_table;
3302 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3303 skdev->timeout_slot[i] = 0;
3305 skdev->in_flight = 0;
3308 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3314 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3316 pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3317 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3319 /* ignore any mtd that is an ack for something we didn't send */
3320 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3323 switch (FIT_MXD_TYPE(mfd)) {
3324 case FIT_MTD_FITFW_INIT:
3325 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3327 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3328 pr_err("(%s): protocol mismatch\n",
3330 pr_err("(%s): got=%d support=%d\n",
3331 skdev->name, skdev->proto_ver,
3332 FIT_PROTOCOL_VERSION_1);
3333 pr_err("(%s): please upgrade driver\n",
3335 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3336 skd_soft_reset(skdev);
3339 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3340 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3341 skdev->last_mtd = mtd;
3344 case FIT_MTD_GET_CMDQ_DEPTH:
3345 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3346 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3347 SKD_N_COMPLETION_ENTRY);
3348 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3349 skdev->last_mtd = mtd;
3352 case FIT_MTD_SET_COMPQ_DEPTH:
3353 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3354 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3355 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3356 skdev->last_mtd = mtd;
3359 case FIT_MTD_SET_COMPQ_ADDR:
3360 skd_reset_skcomp(skdev);
3361 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3362 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3363 skdev->last_mtd = mtd;
3366 case FIT_MTD_CMD_LOG_HOST_ID:
3367 skdev->connect_time_stamp = get_seconds();
3368 data = skdev->connect_time_stamp & 0xFFFF;
3369 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3370 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3371 skdev->last_mtd = mtd;
3374 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3375 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3376 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3377 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3378 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3379 skdev->last_mtd = mtd;
3382 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3383 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3384 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3385 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3386 skdev->last_mtd = mtd;
3388 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3390 skdev->connect_time_stamp, skdev->drive_jiffies);
3393 case FIT_MTD_ARM_QUEUE:
3394 skdev->last_mtd = 0;
3396 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3405 static void skd_disable_interrupts(struct skd_device *skdev)
3409 sense = SKD_READL(skdev, FIT_CONTROL);
3410 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3411 SKD_WRITEL(skdev, sense, FIT_CONTROL);
3412 pr_debug("%s:%s:%d sense 0x%x\n",
3413 skdev->name, __func__, __LINE__, sense);
3415 /* Note that the 1s is written. A 1-bit means
3416 * disable, a 0 means enable.
3418 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3421 static void skd_enable_interrupts(struct skd_device *skdev)
3425 /* unmask interrupts first */
3426 val = FIT_ISH_FW_STATE_CHANGE +
3427 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3429 /* Note that the compliment of mask is written. A 1-bit means
3430 * disable, a 0 means enable. */
3431 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3432 pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3433 skdev->name, __func__, __LINE__, ~val);
3435 val = SKD_READL(skdev, FIT_CONTROL);
3436 val |= FIT_CR_ENABLE_INTERRUPTS;
3437 pr_debug("%s:%s:%d control=0x%x\n",
3438 skdev->name, __func__, __LINE__, val);
3439 SKD_WRITEL(skdev, val, FIT_CONTROL);
3443 *****************************************************************************
3444 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3445 *****************************************************************************
3448 static void skd_soft_reset(struct skd_device *skdev)
3452 val = SKD_READL(skdev, FIT_CONTROL);
3453 val |= (FIT_CR_SOFT_RESET);
3454 pr_debug("%s:%s:%d control=0x%x\n",
3455 skdev->name, __func__, __LINE__, val);
3456 SKD_WRITEL(skdev, val, FIT_CONTROL);
3459 static void skd_start_device(struct skd_device *skdev)
3461 unsigned long flags;
3465 spin_lock_irqsave(&skdev->lock, flags);
3467 /* ack all ghost interrupts */
3468 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3470 sense = SKD_READL(skdev, FIT_STATUS);
3472 pr_debug("%s:%s:%d initial status=0x%x\n",
3473 skdev->name, __func__, __LINE__, sense);
3475 state = sense & FIT_SR_DRIVE_STATE_MASK;
3476 skdev->drive_state = state;
3477 skdev->last_mtd = 0;
3479 skdev->state = SKD_DRVR_STATE_STARTING;
3480 skdev->timer_countdown = SKD_STARTING_TIMO;
3482 skd_enable_interrupts(skdev);
3484 switch (skdev->drive_state) {
3485 case FIT_SR_DRIVE_OFFLINE:
3486 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3489 case FIT_SR_DRIVE_FW_BOOTING:
3490 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3491 skdev->name, __func__, __LINE__, skdev->name);
3492 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3493 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3496 case FIT_SR_DRIVE_BUSY_SANITIZE:
3497 pr_info("(%s): Start: BUSY_SANITIZE\n",
3499 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3500 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3503 case FIT_SR_DRIVE_BUSY_ERASE:
3504 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3505 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3506 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3509 case FIT_SR_DRIVE_INIT:
3510 case FIT_SR_DRIVE_ONLINE:
3511 skd_soft_reset(skdev);
3514 case FIT_SR_DRIVE_BUSY:
3515 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3516 skdev->state = SKD_DRVR_STATE_BUSY;
3517 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3520 case FIT_SR_DRIVE_SOFT_RESET:
3521 pr_err("(%s) drive soft reset in prog\n",
3525 case FIT_SR_DRIVE_FAULT:
3526 /* Fault state is bad...soft reset won't do it...
3527 * Hard reset, maybe, but does it work on device?
3528 * For now, just fault so the system doesn't hang.
3530 skd_drive_fault(skdev);
3531 /*start the queue so we can respond with error to requests */
3532 pr_debug("%s:%s:%d starting %s queue\n",
3533 skdev->name, __func__, __LINE__, skdev->name);
3534 blk_start_queue(skdev->queue);
3535 skdev->gendisk_on = -1;
3536 wake_up_interruptible(&skdev->waitq);
3540 /* Most likely the device isn't there or isn't responding
3541 * to the BAR1 addresses. */
3542 skd_drive_disappeared(skdev);
3543 /*start the queue so we can respond with error to requests */
3544 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3545 skdev->name, __func__, __LINE__, skdev->name);
3546 blk_start_queue(skdev->queue);
3547 skdev->gendisk_on = -1;
3548 wake_up_interruptible(&skdev->waitq);
3552 pr_err("(%s) Start: unknown state %x\n",
3553 skd_name(skdev), skdev->drive_state);
3557 state = SKD_READL(skdev, FIT_CONTROL);
3558 pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3559 skdev->name, __func__, __LINE__, state);
3561 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3562 pr_debug("%s:%s:%d Intr Status=0x%x\n",
3563 skdev->name, __func__, __LINE__, state);
3565 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3566 pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3567 skdev->name, __func__, __LINE__, state);
3569 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3570 pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3571 skdev->name, __func__, __LINE__, state);
3573 state = SKD_READL(skdev, FIT_HW_VERSION);
3574 pr_debug("%s:%s:%d HW version=0x%x\n",
3575 skdev->name, __func__, __LINE__, state);
3577 spin_unlock_irqrestore(&skdev->lock, flags);
3580 static void skd_stop_device(struct skd_device *skdev)
3582 unsigned long flags;
3583 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3587 spin_lock_irqsave(&skdev->lock, flags);
3589 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3590 pr_err("(%s): skd_stop_device not online no sync\n",
3595 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3596 pr_err("(%s): skd_stop_device no special\n",
3601 skdev->state = SKD_DRVR_STATE_SYNCING;
3602 skdev->sync_done = 0;
3604 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3606 spin_unlock_irqrestore(&skdev->lock, flags);
3608 wait_event_interruptible_timeout(skdev->waitq,
3609 (skdev->sync_done), (10 * HZ));
3611 spin_lock_irqsave(&skdev->lock, flags);
3613 switch (skdev->sync_done) {
3615 pr_err("(%s): skd_stop_device no sync\n",
3619 pr_err("(%s): skd_stop_device sync done\n",
3623 pr_err("(%s): skd_stop_device sync error\n",
3628 skdev->state = SKD_DRVR_STATE_STOPPING;
3629 spin_unlock_irqrestore(&skdev->lock, flags);
3631 skd_kill_timer(skdev);
3633 spin_lock_irqsave(&skdev->lock, flags);
3634 skd_disable_interrupts(skdev);
3636 /* ensure all ints on device are cleared */
3637 /* soft reset the device to unload with a clean slate */
3638 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3639 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3641 spin_unlock_irqrestore(&skdev->lock, flags);
3643 /* poll every 100ms, 1 second timeout */
3644 for (i = 0; i < 10; i++) {
3646 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3647 if (dev_state == FIT_SR_DRIVE_INIT)
3649 set_current_state(TASK_INTERRUPTIBLE);
3650 schedule_timeout(msecs_to_jiffies(100));
3653 if (dev_state != FIT_SR_DRIVE_INIT)
3654 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3655 skd_name(skdev), dev_state);
3658 /* assume spinlock is held */
3659 static void skd_restart_device(struct skd_device *skdev)
3663 /* ack all ghost interrupts */
3664 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3666 state = SKD_READL(skdev, FIT_STATUS);
3668 pr_debug("%s:%s:%d drive status=0x%x\n",
3669 skdev->name, __func__, __LINE__, state);
3671 state &= FIT_SR_DRIVE_STATE_MASK;
3672 skdev->drive_state = state;
3673 skdev->last_mtd = 0;
3675 skdev->state = SKD_DRVR_STATE_RESTARTING;
3676 skdev->timer_countdown = SKD_RESTARTING_TIMO;
3678 skd_soft_reset(skdev);
3681 /* assume spinlock is held */
3682 static int skd_quiesce_dev(struct skd_device *skdev)
3686 switch (skdev->state) {
3687 case SKD_DRVR_STATE_BUSY:
3688 case SKD_DRVR_STATE_BUSY_IMMINENT:
3689 pr_debug("%s:%s:%d stopping %s queue\n",
3690 skdev->name, __func__, __LINE__, skdev->name);
3691 blk_stop_queue(skdev->queue);
3693 case SKD_DRVR_STATE_ONLINE:
3694 case SKD_DRVR_STATE_STOPPING:
3695 case SKD_DRVR_STATE_SYNCING:
3696 case SKD_DRVR_STATE_PAUSING:
3697 case SKD_DRVR_STATE_PAUSED:
3698 case SKD_DRVR_STATE_STARTING:
3699 case SKD_DRVR_STATE_RESTARTING:
3700 case SKD_DRVR_STATE_RESUMING:
3703 pr_debug("%s:%s:%d state [%d] not implemented\n",
3704 skdev->name, __func__, __LINE__, skdev->state);
3709 /* assume spinlock is held */
3710 static int skd_unquiesce_dev(struct skd_device *skdev)
3712 int prev_driver_state = skdev->state;
3714 skd_log_skdev(skdev, "unquiesce");
3715 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3716 pr_debug("%s:%s:%d **** device already ONLINE\n",
3717 skdev->name, __func__, __LINE__);
3720 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3722 * If there has been an state change to other than
3723 * ONLINE, we will rely on controller state change
3724 * to come back online and restart the queue.
3725 * The BUSY state means that driver is ready to
3726 * continue normal processing but waiting for controller
3727 * to become available.
3729 skdev->state = SKD_DRVR_STATE_BUSY;
3730 pr_debug("%s:%s:%d drive BUSY state\n",
3731 skdev->name, __func__, __LINE__);
3736 * Drive has just come online, driver is either in startup,
3737 * paused performing a task, or bust waiting for hardware.
3739 switch (skdev->state) {
3740 case SKD_DRVR_STATE_PAUSED:
3741 case SKD_DRVR_STATE_BUSY:
3742 case SKD_DRVR_STATE_BUSY_IMMINENT:
3743 case SKD_DRVR_STATE_BUSY_ERASE:
3744 case SKD_DRVR_STATE_STARTING:
3745 case SKD_DRVR_STATE_RESTARTING:
3746 case SKD_DRVR_STATE_FAULT:
3747 case SKD_DRVR_STATE_IDLE:
3748 case SKD_DRVR_STATE_LOAD:
3749 skdev->state = SKD_DRVR_STATE_ONLINE;
3750 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3752 skd_skdev_state_to_str(prev_driver_state),
3753 prev_driver_state, skd_skdev_state_to_str(skdev->state),
3755 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3756 skdev->name, __func__, __LINE__);
3757 pr_debug("%s:%s:%d starting %s queue\n",
3758 skdev->name, __func__, __LINE__, skdev->name);
3759 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3760 blk_start_queue(skdev->queue);
3761 skdev->gendisk_on = 1;
3762 wake_up_interruptible(&skdev->waitq);
3765 case SKD_DRVR_STATE_DISAPPEARED:
3767 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3768 skdev->name, __func__, __LINE__,
3776 *****************************************************************************
3777 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3778 *****************************************************************************
3781 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3783 struct skd_device *skdev = skd_host_data;
3784 unsigned long flags;
3786 spin_lock_irqsave(&skdev->lock, flags);
3787 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3788 skdev->name, __func__, __LINE__,
3789 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3790 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3791 irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3792 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3793 spin_unlock_irqrestore(&skdev->lock, flags);
3797 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3799 struct skd_device *skdev = skd_host_data;
3800 unsigned long flags;
3802 spin_lock_irqsave(&skdev->lock, flags);
3803 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3804 skdev->name, __func__, __LINE__,
3805 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3806 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3807 skd_isr_fwstate(skdev);
3808 spin_unlock_irqrestore(&skdev->lock, flags);
3812 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3814 struct skd_device *skdev = skd_host_data;
3815 unsigned long flags;
3816 int flush_enqueued = 0;
3819 spin_lock_irqsave(&skdev->lock, flags);
3820 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3821 skdev->name, __func__, __LINE__,
3822 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3823 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3824 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3827 skd_request_fn(skdev->queue);
3830 schedule_work(&skdev->completion_worker);
3831 else if (!flush_enqueued)
3832 skd_request_fn(skdev->queue);
3834 spin_unlock_irqrestore(&skdev->lock, flags);
3839 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3841 struct skd_device *skdev = skd_host_data;
3842 unsigned long flags;
3844 spin_lock_irqsave(&skdev->lock, flags);
3845 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3846 skdev->name, __func__, __LINE__,
3847 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3848 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3849 skd_isr_msg_from_dev(skdev);
3850 spin_unlock_irqrestore(&skdev->lock, flags);
3854 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3856 struct skd_device *skdev = skd_host_data;
3857 unsigned long flags;
3859 spin_lock_irqsave(&skdev->lock, flags);
3860 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3861 skdev->name, __func__, __LINE__,
3862 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3863 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3864 spin_unlock_irqrestore(&skdev->lock, flags);
3869 *****************************************************************************
3870 * PCIe MSI/MSI-X SETUP
3871 *****************************************************************************
3874 struct skd_msix_entry {
3878 struct skd_device *rsp;
3882 struct skd_init_msix_entry {
3884 irq_handler_t handler;
3887 #define SKD_MAX_MSIX_COUNT 13
3888 #define SKD_MIN_MSIX_COUNT 7
3889 #define SKD_BASE_MSIX_IRQ 4
3891 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3892 { "(DMA 0)", skd_reserved_isr },
3893 { "(DMA 1)", skd_reserved_isr },
3894 { "(DMA 2)", skd_reserved_isr },
3895 { "(DMA 3)", skd_reserved_isr },
3896 { "(State Change)", skd_statec_isr },
3897 { "(COMPL_Q)", skd_comp_q },
3898 { "(MSG)", skd_msg_isr },
3899 { "(Reserved)", skd_reserved_isr },
3900 { "(Reserved)", skd_reserved_isr },
3901 { "(Queue Full 0)", skd_qfull_isr },
3902 { "(Queue Full 1)", skd_qfull_isr },
3903 { "(Queue Full 2)", skd_qfull_isr },
3904 { "(Queue Full 3)", skd_qfull_isr },
3907 static void skd_release_msix(struct skd_device *skdev)
3909 struct skd_msix_entry *qentry;
3912 if (skdev->msix_entries) {
3913 for (i = 0; i < skdev->msix_count; i++) {
3914 qentry = &skdev->msix_entries[i];
3915 skdev = qentry->rsp;
3917 if (qentry->have_irq)
3918 devm_free_irq(&skdev->pdev->dev,
3919 qentry->vector, qentry->rsp);
3922 kfree(skdev->msix_entries);
3925 if (skdev->msix_count)
3926 pci_disable_msix(skdev->pdev);
3928 skdev->msix_count = 0;
3929 skdev->msix_entries = NULL;
3932 static int skd_acquire_msix(struct skd_device *skdev)
3935 struct pci_dev *pdev = skdev->pdev;
3936 struct msix_entry *entries;
3937 struct skd_msix_entry *qentry;
3939 entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
3944 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
3945 entries[i].entry = i;
3947 rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
3949 pr_err("(%s): failed to enable MSI-X %d\n",
3950 skd_name(skdev), rc);
3954 skdev->msix_count = SKD_MAX_MSIX_COUNT;
3955 skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
3956 skdev->msix_count, GFP_KERNEL);
3957 if (!skdev->msix_entries) {
3959 pr_err("(%s): msix table allocation error\n",
3964 for (i = 0; i < skdev->msix_count; i++) {
3965 qentry = &skdev->msix_entries[i];
3966 qentry->vector = entries[i].vector;
3967 qentry->entry = entries[i].entry;
3969 qentry->have_irq = 0;
3970 pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
3971 skdev->name, __func__, __LINE__,
3972 pci_name(pdev), skdev->name,
3973 i, qentry->vector, qentry->entry);
3976 /* Enable MSI-X vectors for the base queue */
3977 for (i = 0; i < skdev->msix_count; i++) {
3978 qentry = &skdev->msix_entries[i];
3979 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3980 "%s%d-msix %s", DRV_NAME, skdev->devno,
3981 msix_entries[i].name);
3982 rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
3983 msix_entries[i].handler, 0,
3984 qentry->isr_name, skdev);
3986 pr_err("(%s): Unable to register(%d) MSI-X "
3988 skd_name(skdev), rc, i, qentry->isr_name);
3991 qentry->have_irq = 1;
3992 qentry->rsp = skdev;
3995 pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3996 skdev->name, __func__, __LINE__,
3997 pci_name(pdev), skdev->name, skdev->msix_count);
4003 skd_release_msix(skdev);
4007 static int skd_acquire_irq(struct skd_device *skdev)
4010 struct pci_dev *pdev;
4013 skdev->msix_count = 0;
4016 switch (skdev->irq_type) {
4018 rc = skd_acquire_msix(skdev);
4020 pr_info("(%s): MSI-X %d irqs enabled\n",
4021 skd_name(skdev), skdev->msix_count);
4024 "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
4025 skd_name(skdev), rc);
4026 skdev->irq_type = SKD_IRQ_MSI;
4027 goto RETRY_IRQ_TYPE;
4031 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
4032 DRV_NAME, skdev->devno);
4033 rc = pci_enable_msi_range(pdev, 1, 1);
4035 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
4036 skdev->isr_name, skdev);
4038 pci_disable_msi(pdev);
4040 "(%s): failed to allocate the MSI interrupt %d\n",
4041 skd_name(skdev), rc);
4042 goto RETRY_IRQ_LEGACY;
4044 pr_info("(%s): MSI irq %d enabled\n",
4045 skd_name(skdev), pdev->irq);
4049 "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
4050 skd_name(skdev), rc);
4051 skdev->irq_type = SKD_IRQ_LEGACY;
4052 goto RETRY_IRQ_TYPE;
4055 case SKD_IRQ_LEGACY:
4056 snprintf(skdev->isr_name, sizeof(skdev->isr_name),
4057 "%s%d-legacy", DRV_NAME, skdev->devno);
4058 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
4059 IRQF_SHARED, skdev->isr_name, skdev);
4061 pr_info("(%s): LEGACY irq %d enabled\n",
4062 skd_name(skdev), pdev->irq);
4064 pr_err("(%s): request LEGACY irq error %d\n",
4065 skd_name(skdev), rc);
4068 pr_info("(%s): irq_type %d invalid, re-set to %d\n",
4069 skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
4070 skdev->irq_type = SKD_IRQ_LEGACY;
4071 goto RETRY_IRQ_TYPE;
4076 static void skd_release_irq(struct skd_device *skdev)
4078 switch (skdev->irq_type) {
4080 skd_release_msix(skdev);
4083 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4084 pci_disable_msi(skdev->pdev);
4086 case SKD_IRQ_LEGACY:
4087 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4090 pr_err("(%s): wrong irq type %d!",
4091 skd_name(skdev), skdev->irq_type);
4097 *****************************************************************************
4099 *****************************************************************************
4102 static int skd_cons_skcomp(struct skd_device *skdev)
4105 struct fit_completion_entry_v1 *skcomp;
4108 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
4109 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
4111 pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
4112 skdev->name, __func__, __LINE__,
4113 nbytes, SKD_N_COMPLETION_ENTRY);
4115 skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
4116 &skdev->cq_dma_address);
4118 if (skcomp == NULL) {
4123 skdev->skcomp_table = skcomp;
4124 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
4126 SKD_N_COMPLETION_ENTRY);
4132 static int skd_cons_skmsg(struct skd_device *skdev)
4137 pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4138 skdev->name, __func__, __LINE__,
4139 sizeof(struct skd_fitmsg_context),
4140 skdev->num_fitmsg_context,
4141 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
4143 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4144 *skdev->num_fitmsg_context, GFP_KERNEL);
4145 if (skdev->skmsg_table == NULL) {
4150 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4151 struct skd_fitmsg_context *skmsg;
4153 skmsg = &skdev->skmsg_table[i];
4155 skmsg->id = i + SKD_ID_FIT_MSG;
4157 skmsg->state = SKD_MSG_STATE_IDLE;
4158 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4159 SKD_N_FITMSG_BYTES + 64,
4160 &skmsg->mb_dma_address);
4162 if (skmsg->msg_buf == NULL) {
4167 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4168 (~FIT_QCMD_BASE_ADDRESS_MASK));
4169 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4170 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4171 FIT_QCMD_BASE_ADDRESS_MASK);
4172 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4173 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4174 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4176 skmsg->next = &skmsg[1];
4179 /* Free list is in order starting with the 0th entry. */
4180 skdev->skmsg_table[i - 1].next = NULL;
4181 skdev->skmsg_free_list = skdev->skmsg_table;
4187 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4189 dma_addr_t *ret_dma_addr)
4191 struct fit_sg_descriptor *sg_list;
4194 nbytes = sizeof(*sg_list) * n_sg;
4196 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4198 if (sg_list != NULL) {
4199 uint64_t dma_address = *ret_dma_addr;
4202 memset(sg_list, 0, nbytes);
4204 for (i = 0; i < n_sg - 1; i++) {
4206 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4208 sg_list[i].next_desc_ptr = dma_address + ndp_off;
4210 sg_list[i].next_desc_ptr = 0LL;
4216 static int skd_cons_skreq(struct skd_device *skdev)
4221 pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4222 skdev->name, __func__, __LINE__,
4223 sizeof(struct skd_request_context),
4224 skdev->num_req_context,
4225 sizeof(struct skd_request_context) * skdev->num_req_context);
4227 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4228 * skdev->num_req_context, GFP_KERNEL);
4229 if (skdev->skreq_table == NULL) {
4234 pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4235 skdev->name, __func__, __LINE__,
4236 skdev->sgs_per_request, sizeof(struct scatterlist),
4237 skdev->sgs_per_request * sizeof(struct scatterlist));
4239 for (i = 0; i < skdev->num_req_context; i++) {
4240 struct skd_request_context *skreq;
4242 skreq = &skdev->skreq_table[i];
4244 skreq->id = i + SKD_ID_RW_REQUEST;
4245 skreq->state = SKD_REQ_STATE_IDLE;
4247 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4248 skdev->sgs_per_request, GFP_KERNEL);
4249 if (skreq->sg == NULL) {
4253 sg_init_table(skreq->sg, skdev->sgs_per_request);
4255 skreq->sksg_list = skd_cons_sg_list(skdev,
4256 skdev->sgs_per_request,
4257 &skreq->sksg_dma_address);
4259 if (skreq->sksg_list == NULL) {
4264 skreq->next = &skreq[1];
4267 /* Free list is in order starting with the 0th entry. */
4268 skdev->skreq_table[i - 1].next = NULL;
4269 skdev->skreq_free_list = skdev->skreq_table;
4275 static int skd_cons_skspcl(struct skd_device *skdev)
4280 pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4281 skdev->name, __func__, __LINE__,
4282 sizeof(struct skd_special_context),
4284 sizeof(struct skd_special_context) * skdev->n_special);
4286 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4287 * skdev->n_special, GFP_KERNEL);
4288 if (skdev->skspcl_table == NULL) {
4293 for (i = 0; i < skdev->n_special; i++) {
4294 struct skd_special_context *skspcl;
4296 skspcl = &skdev->skspcl_table[i];
4298 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4299 skspcl->req.state = SKD_REQ_STATE_IDLE;
4301 skspcl->req.next = &skspcl[1].req;
4303 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4306 pci_zalloc_consistent(skdev->pdev, nbytes,
4307 &skspcl->mb_dma_address);
4308 if (skspcl->msg_buf == NULL) {
4313 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4314 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4315 if (skspcl->req.sg == NULL) {
4320 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4321 SKD_N_SG_PER_SPECIAL,
4324 if (skspcl->req.sksg_list == NULL) {
4330 /* Free list is in order starting with the 0th entry. */
4331 skdev->skspcl_table[i - 1].req.next = NULL;
4332 skdev->skspcl_free_list = skdev->skspcl_table;
4340 static int skd_cons_sksb(struct skd_device *skdev)
4343 struct skd_special_context *skspcl;
4346 skspcl = &skdev->internal_skspcl;
4348 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4349 skspcl->req.state = SKD_REQ_STATE_IDLE;
4351 nbytes = SKD_N_INTERNAL_BYTES;
4353 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4354 &skspcl->db_dma_address);
4355 if (skspcl->data_buf == NULL) {
4360 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4361 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4362 &skspcl->mb_dma_address);
4363 if (skspcl->msg_buf == NULL) {
4368 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4369 &skspcl->req.sksg_dma_address);
4370 if (skspcl->req.sksg_list == NULL) {
4375 if (!skd_format_internal_skspcl(skdev)) {
4384 static int skd_cons_disk(struct skd_device *skdev)
4387 struct gendisk *disk;
4388 struct request_queue *q;
4389 unsigned long flags;
4391 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4398 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4400 disk->major = skdev->major;
4401 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4402 disk->fops = &skd_blockdev_ops;
4403 disk->private_data = skdev;
4405 q = blk_init_queue(skd_request_fn, &skdev->lock);
4413 q->queuedata = skdev;
4415 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
4416 blk_queue_max_segments(q, skdev->sgs_per_request);
4417 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4419 /* set sysfs ptimal_io_size to 8K */
4420 blk_queue_io_opt(q, 8192);
4422 /* DISCARD Flag initialization. */
4423 q->limits.discard_granularity = 8192;
4424 q->limits.discard_alignment = 0;
4425 q->limits.max_discard_sectors = UINT_MAX >> 9;
4426 q->limits.discard_zeroes_data = 1;
4427 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4428 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4429 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4431 spin_lock_irqsave(&skdev->lock, flags);
4432 pr_debug("%s:%s:%d stopping %s queue\n",
4433 skdev->name, __func__, __LINE__, skdev->name);
4434 blk_stop_queue(skdev->queue);
4435 spin_unlock_irqrestore(&skdev->lock, flags);
4441 #define SKD_N_DEV_TABLE 16u
4442 static u32 skd_next_devno;
4444 static struct skd_device *skd_construct(struct pci_dev *pdev)
4446 struct skd_device *skdev;
4447 int blk_major = skd_major;
4450 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4453 pr_err(PFX "(%s): memory alloc failure\n",
4458 skdev->state = SKD_DRVR_STATE_LOAD;
4460 skdev->devno = skd_next_devno++;
4461 skdev->major = blk_major;
4462 skdev->irq_type = skd_isr_type;
4463 sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4464 skdev->dev_max_queue_depth = 0;
4466 skdev->num_req_context = skd_max_queue_depth;
4467 skdev->num_fitmsg_context = skd_max_queue_depth;
4468 skdev->n_special = skd_max_pass_thru;
4469 skdev->cur_max_queue_depth = 1;
4470 skdev->queue_low_water_mark = 1;
4471 skdev->proto_ver = 99;
4472 skdev->sgs_per_request = skd_sgs_per_request;
4473 skdev->dbg_level = skd_dbg_level;
4475 atomic_set(&skdev->device_count, 0);
4477 spin_lock_init(&skdev->lock);
4479 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4481 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4482 rc = skd_cons_skcomp(skdev);
4486 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4487 rc = skd_cons_skmsg(skdev);
4491 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4492 rc = skd_cons_skreq(skdev);
4496 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4497 rc = skd_cons_skspcl(skdev);
4501 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4502 rc = skd_cons_sksb(skdev);
4506 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4507 rc = skd_cons_disk(skdev);
4511 pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4515 pr_debug("%s:%s:%d construct failed\n",
4516 skdev->name, __func__, __LINE__);
4517 skd_destruct(skdev);
4522 *****************************************************************************
4524 *****************************************************************************
4527 static void skd_free_skcomp(struct skd_device *skdev)
4529 if (skdev->skcomp_table != NULL) {
4532 nbytes = sizeof(skdev->skcomp_table[0]) *
4533 SKD_N_COMPLETION_ENTRY;
4534 pci_free_consistent(skdev->pdev, nbytes,
4535 skdev->skcomp_table, skdev->cq_dma_address);
4538 skdev->skcomp_table = NULL;
4539 skdev->cq_dma_address = 0;
4542 static void skd_free_skmsg(struct skd_device *skdev)
4546 if (skdev->skmsg_table == NULL)
4549 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4550 struct skd_fitmsg_context *skmsg;
4552 skmsg = &skdev->skmsg_table[i];
4554 if (skmsg->msg_buf != NULL) {
4555 skmsg->msg_buf += skmsg->offset;
4556 skmsg->mb_dma_address += skmsg->offset;
4557 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4559 skmsg->mb_dma_address);
4561 skmsg->msg_buf = NULL;
4562 skmsg->mb_dma_address = 0;
4565 kfree(skdev->skmsg_table);
4566 skdev->skmsg_table = NULL;
4569 static void skd_free_sg_list(struct skd_device *skdev,
4570 struct fit_sg_descriptor *sg_list,
4571 u32 n_sg, dma_addr_t dma_addr)
4573 if (sg_list != NULL) {
4576 nbytes = sizeof(*sg_list) * n_sg;
4578 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4582 static void skd_free_skreq(struct skd_device *skdev)
4586 if (skdev->skreq_table == NULL)
4589 for (i = 0; i < skdev->num_req_context; i++) {
4590 struct skd_request_context *skreq;
4592 skreq = &skdev->skreq_table[i];
4594 skd_free_sg_list(skdev, skreq->sksg_list,
4595 skdev->sgs_per_request,
4596 skreq->sksg_dma_address);
4598 skreq->sksg_list = NULL;
4599 skreq->sksg_dma_address = 0;
4604 kfree(skdev->skreq_table);
4605 skdev->skreq_table = NULL;
4608 static void skd_free_skspcl(struct skd_device *skdev)
4613 if (skdev->skspcl_table == NULL)
4616 for (i = 0; i < skdev->n_special; i++) {
4617 struct skd_special_context *skspcl;
4619 skspcl = &skdev->skspcl_table[i];
4621 if (skspcl->msg_buf != NULL) {
4622 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4623 pci_free_consistent(skdev->pdev, nbytes,
4625 skspcl->mb_dma_address);
4628 skspcl->msg_buf = NULL;
4629 skspcl->mb_dma_address = 0;
4631 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4632 SKD_N_SG_PER_SPECIAL,
4633 skspcl->req.sksg_dma_address);
4635 skspcl->req.sksg_list = NULL;
4636 skspcl->req.sksg_dma_address = 0;
4638 kfree(skspcl->req.sg);
4641 kfree(skdev->skspcl_table);
4642 skdev->skspcl_table = NULL;
4645 static void skd_free_sksb(struct skd_device *skdev)
4647 struct skd_special_context *skspcl;
4650 skspcl = &skdev->internal_skspcl;
4652 if (skspcl->data_buf != NULL) {
4653 nbytes = SKD_N_INTERNAL_BYTES;
4655 pci_free_consistent(skdev->pdev, nbytes,
4656 skspcl->data_buf, skspcl->db_dma_address);
4659 skspcl->data_buf = NULL;
4660 skspcl->db_dma_address = 0;
4662 if (skspcl->msg_buf != NULL) {
4663 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4664 pci_free_consistent(skdev->pdev, nbytes,
4665 skspcl->msg_buf, skspcl->mb_dma_address);
4668 skspcl->msg_buf = NULL;
4669 skspcl->mb_dma_address = 0;
4671 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4672 skspcl->req.sksg_dma_address);
4674 skspcl->req.sksg_list = NULL;
4675 skspcl->req.sksg_dma_address = 0;
4678 static void skd_free_disk(struct skd_device *skdev)
4680 struct gendisk *disk = skdev->disk;
4683 struct request_queue *q = disk->queue;
4685 if (disk->flags & GENHD_FL_UP)
4688 blk_cleanup_queue(q);
4694 static void skd_destruct(struct skd_device *skdev)
4700 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4701 skd_free_disk(skdev);
4703 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4704 skd_free_sksb(skdev);
4706 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4707 skd_free_skspcl(skdev);
4709 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4710 skd_free_skreq(skdev);
4712 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4713 skd_free_skmsg(skdev);
4715 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4716 skd_free_skcomp(skdev);
4718 pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4723 *****************************************************************************
4724 * BLOCK DEVICE (BDEV) GLUE
4725 *****************************************************************************
4728 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4730 struct skd_device *skdev;
4733 skdev = bdev->bd_disk->private_data;
4735 pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4736 skdev->name, __func__, __LINE__,
4737 bdev->bd_disk->disk_name, current->comm);
4739 if (skdev->read_cap_is_valid) {
4740 capacity = get_capacity(skdev->disk);
4743 geo->cylinders = (capacity) / (255 * 64);
4750 static int skd_bdev_attach(struct skd_device *skdev)
4752 pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4753 add_disk(skdev->disk);
4757 static const struct block_device_operations skd_blockdev_ops = {
4758 .owner = THIS_MODULE,
4759 .ioctl = skd_bdev_ioctl,
4760 .getgeo = skd_bdev_getgeo,
4765 *****************************************************************************
4767 *****************************************************************************
4770 static const struct pci_device_id skd_pci_tbl[] = {
4771 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4772 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4773 { 0 } /* terminate list */
4776 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4778 static char *skd_pci_info(struct skd_device *skdev, char *str)
4782 strcpy(str, "PCIe (");
4783 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4788 uint16_t pcie_lstat, lspeed, lwidth;
4791 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4792 lspeed = pcie_lstat & (0xF);
4793 lwidth = (pcie_lstat & 0x3F0) >> 4;
4796 strcat(str, "2.5GT/s ");
4797 else if (lspeed == 2)
4798 strcat(str, "5.0GT/s ");
4800 strcat(str, "<unknown> ");
4801 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4807 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4812 struct skd_device *skdev;
4814 pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4815 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4816 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4817 pci_name(pdev), pdev->vendor, pdev->device);
4819 rc = pci_enable_device(pdev);
4822 rc = pci_request_regions(pdev, DRV_NAME);
4825 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4827 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4829 pr_err("(%s): consistent DMA mask error %d\n",
4830 pci_name(pdev), rc);
4833 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4836 pr_err("(%s): DMA mask error %d\n",
4837 pci_name(pdev), rc);
4838 goto err_out_regions;
4843 rc = register_blkdev(0, DRV_NAME);
4845 goto err_out_regions;
4850 skdev = skd_construct(pdev);
4851 if (skdev == NULL) {
4853 goto err_out_regions;
4856 skd_pci_info(skdev, pci_str);
4857 pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4859 pci_set_master(pdev);
4860 rc = pci_enable_pcie_error_reporting(pdev);
4863 "(%s): bad enable of PCIe error reporting rc=%d\n",
4864 skd_name(skdev), rc);
4865 skdev->pcie_error_reporting_is_enabled = 0;
4867 skdev->pcie_error_reporting_is_enabled = 1;
4870 pci_set_drvdata(pdev, skdev);
4872 skdev->disk->driverfs_dev = &pdev->dev;
4874 for (i = 0; i < SKD_MAX_BARS; i++) {
4875 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4876 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4877 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4878 skdev->mem_size[i]);
4879 if (!skdev->mem_map[i]) {
4880 pr_err("(%s): Unable to map adapter memory!\n",
4883 goto err_out_iounmap;
4885 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4886 skdev->name, __func__, __LINE__,
4888 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4891 rc = skd_acquire_irq(skdev);
4893 pr_err("(%s): interrupt resource error %d\n",
4894 skd_name(skdev), rc);
4895 goto err_out_iounmap;
4898 rc = skd_start_timer(skdev);
4902 init_waitqueue_head(&skdev->waitq);
4904 skd_start_device(skdev);
4906 rc = wait_event_interruptible_timeout(skdev->waitq,
4907 (skdev->gendisk_on),
4908 (SKD_START_WAIT_SECONDS * HZ));
4909 if (skdev->gendisk_on > 0) {
4910 /* device came on-line after reset */
4911 skd_bdev_attach(skdev);
4914 /* we timed out, something is wrong with the device,
4915 don't add the disk structure */
4917 "(%s): error: waiting for s1120 timed out %d!\n",
4918 skd_name(skdev), rc);
4919 /* in case of no error; we timeout with ENXIO */
4926 #ifdef SKD_VMK_POLL_HANDLER
4927 if (skdev->irq_type == SKD_IRQ_MSIX) {
4928 /* MSIX completion handler is being used for coredump */
4929 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4930 skdev->msix_entries[5].vector,
4933 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4934 skdev->pdev->irq, skd_isr,
4937 #endif /* SKD_VMK_POLL_HANDLER */
4942 skd_stop_device(skdev);
4943 skd_release_irq(skdev);
4946 for (i = 0; i < SKD_MAX_BARS; i++)
4947 if (skdev->mem_map[i])
4948 iounmap(skdev->mem_map[i]);
4950 if (skdev->pcie_error_reporting_is_enabled)
4951 pci_disable_pcie_error_reporting(pdev);
4953 skd_destruct(skdev);
4956 pci_release_regions(pdev);
4959 pci_disable_device(pdev);
4960 pci_set_drvdata(pdev, NULL);
4964 static void skd_pci_remove(struct pci_dev *pdev)
4967 struct skd_device *skdev;
4969 skdev = pci_get_drvdata(pdev);
4971 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4974 skd_stop_device(skdev);
4975 skd_release_irq(skdev);
4977 for (i = 0; i < SKD_MAX_BARS; i++)
4978 if (skdev->mem_map[i])
4979 iounmap((u32 *)skdev->mem_map[i]);
4981 if (skdev->pcie_error_reporting_is_enabled)
4982 pci_disable_pcie_error_reporting(pdev);
4984 skd_destruct(skdev);
4986 pci_release_regions(pdev);
4987 pci_disable_device(pdev);
4988 pci_set_drvdata(pdev, NULL);
4993 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4996 struct skd_device *skdev;
4998 skdev = pci_get_drvdata(pdev);
5000 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5004 skd_stop_device(skdev);
5006 skd_release_irq(skdev);
5008 for (i = 0; i < SKD_MAX_BARS; i++)
5009 if (skdev->mem_map[i])
5010 iounmap((u32 *)skdev->mem_map[i]);
5012 if (skdev->pcie_error_reporting_is_enabled)
5013 pci_disable_pcie_error_reporting(pdev);
5015 pci_release_regions(pdev);
5016 pci_save_state(pdev);
5017 pci_disable_device(pdev);
5018 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5022 static int skd_pci_resume(struct pci_dev *pdev)
5026 struct skd_device *skdev;
5028 skdev = pci_get_drvdata(pdev);
5030 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5034 pci_set_power_state(pdev, PCI_D0);
5035 pci_enable_wake(pdev, PCI_D0, 0);
5036 pci_restore_state(pdev);
5038 rc = pci_enable_device(pdev);
5041 rc = pci_request_regions(pdev, DRV_NAME);
5044 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5046 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5048 pr_err("(%s): consistent DMA mask error %d\n",
5049 pci_name(pdev), rc);
5052 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5055 pr_err("(%s): DMA mask error %d\n",
5056 pci_name(pdev), rc);
5057 goto err_out_regions;
5061 pci_set_master(pdev);
5062 rc = pci_enable_pcie_error_reporting(pdev);
5064 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
5066 skdev->pcie_error_reporting_is_enabled = 0;
5068 skdev->pcie_error_reporting_is_enabled = 1;
5070 for (i = 0; i < SKD_MAX_BARS; i++) {
5072 skdev->mem_phys[i] = pci_resource_start(pdev, i);
5073 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5074 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5075 skdev->mem_size[i]);
5076 if (!skdev->mem_map[i]) {
5077 pr_err("(%s): Unable to map adapter memory!\n",
5080 goto err_out_iounmap;
5082 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
5083 skdev->name, __func__, __LINE__,
5085 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
5087 rc = skd_acquire_irq(skdev);
5090 pr_err("(%s): interrupt resource error %d\n",
5091 pci_name(pdev), rc);
5092 goto err_out_iounmap;
5095 rc = skd_start_timer(skdev);
5099 init_waitqueue_head(&skdev->waitq);
5101 skd_start_device(skdev);
5106 skd_stop_device(skdev);
5107 skd_release_irq(skdev);
5110 for (i = 0; i < SKD_MAX_BARS; i++)
5111 if (skdev->mem_map[i])
5112 iounmap(skdev->mem_map[i]);
5114 if (skdev->pcie_error_reporting_is_enabled)
5115 pci_disable_pcie_error_reporting(pdev);
5118 pci_release_regions(pdev);
5121 pci_disable_device(pdev);
5125 static void skd_pci_shutdown(struct pci_dev *pdev)
5127 struct skd_device *skdev;
5129 pr_err("skd_pci_shutdown called\n");
5131 skdev = pci_get_drvdata(pdev);
5133 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5137 pr_err("%s: calling stop\n", skd_name(skdev));
5138 skd_stop_device(skdev);
5141 static struct pci_driver skd_driver = {
5143 .id_table = skd_pci_tbl,
5144 .probe = skd_pci_probe,
5145 .remove = skd_pci_remove,
5146 .suspend = skd_pci_suspend,
5147 .resume = skd_pci_resume,
5148 .shutdown = skd_pci_shutdown,
5152 *****************************************************************************
5154 *****************************************************************************
5157 static const char *skd_name(struct skd_device *skdev)
5159 memset(skdev->id_str, 0, sizeof(skdev->id_str));
5161 if (skdev->inquiry_is_valid)
5162 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5163 skdev->name, skdev->inq_serial_num,
5164 pci_name(skdev->pdev));
5166 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5167 skdev->name, pci_name(skdev->pdev));
5169 return skdev->id_str;
5172 const char *skd_drive_state_to_str(int state)
5175 case FIT_SR_DRIVE_OFFLINE:
5177 case FIT_SR_DRIVE_INIT:
5179 case FIT_SR_DRIVE_ONLINE:
5181 case FIT_SR_DRIVE_BUSY:
5183 case FIT_SR_DRIVE_FAULT:
5185 case FIT_SR_DRIVE_DEGRADED:
5187 case FIT_SR_PCIE_LINK_DOWN:
5189 case FIT_SR_DRIVE_SOFT_RESET:
5190 return "SOFT_RESET";
5191 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5193 case FIT_SR_DRIVE_INIT_FAULT:
5194 return "INIT_FAULT";
5195 case FIT_SR_DRIVE_BUSY_SANITIZE:
5196 return "BUSY_SANITIZE";
5197 case FIT_SR_DRIVE_BUSY_ERASE:
5198 return "BUSY_ERASE";
5199 case FIT_SR_DRIVE_FW_BOOTING:
5200 return "FW_BOOTING";
5206 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5209 case SKD_DRVR_STATE_LOAD:
5211 case SKD_DRVR_STATE_IDLE:
5213 case SKD_DRVR_STATE_BUSY:
5215 case SKD_DRVR_STATE_STARTING:
5217 case SKD_DRVR_STATE_ONLINE:
5219 case SKD_DRVR_STATE_PAUSING:
5221 case SKD_DRVR_STATE_PAUSED:
5223 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5224 return "DRAINING_TIMEOUT";
5225 case SKD_DRVR_STATE_RESTARTING:
5226 return "RESTARTING";
5227 case SKD_DRVR_STATE_RESUMING:
5229 case SKD_DRVR_STATE_STOPPING:
5231 case SKD_DRVR_STATE_SYNCING:
5233 case SKD_DRVR_STATE_FAULT:
5235 case SKD_DRVR_STATE_DISAPPEARED:
5236 return "DISAPPEARED";
5237 case SKD_DRVR_STATE_BUSY_ERASE:
5238 return "BUSY_ERASE";
5239 case SKD_DRVR_STATE_BUSY_SANITIZE:
5240 return "BUSY_SANITIZE";
5241 case SKD_DRVR_STATE_BUSY_IMMINENT:
5242 return "BUSY_IMMINENT";
5243 case SKD_DRVR_STATE_WAIT_BOOT:
5251 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5254 case SKD_MSG_STATE_IDLE:
5256 case SKD_MSG_STATE_BUSY:
5263 static const char *skd_skreq_state_to_str(enum skd_req_state state)
5266 case SKD_REQ_STATE_IDLE:
5268 case SKD_REQ_STATE_SETUP:
5270 case SKD_REQ_STATE_BUSY:
5272 case SKD_REQ_STATE_COMPLETED:
5274 case SKD_REQ_STATE_TIMEOUT:
5276 case SKD_REQ_STATE_ABORTED:
5283 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5285 pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5286 skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5287 pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
5288 skdev->name, __func__, __LINE__,
5289 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5290 skd_skdev_state_to_str(skdev->state), skdev->state);
5291 pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
5292 skdev->name, __func__, __LINE__,
5293 skdev->in_flight, skdev->cur_max_queue_depth,
5294 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5295 pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
5296 skdev->name, __func__, __LINE__,
5297 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5300 static void skd_log_skmsg(struct skd_device *skdev,
5301 struct skd_fitmsg_context *skmsg, const char *event)
5303 pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5304 skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5305 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
5306 skdev->name, __func__, __LINE__,
5307 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5308 skmsg->id, skmsg->length);
5311 static void skd_log_skreq(struct skd_device *skdev,
5312 struct skd_request_context *skreq, const char *event)
5314 pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5315 skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5316 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5317 skdev->name, __func__, __LINE__,
5318 skd_skreq_state_to_str(skreq->state), skreq->state,
5319 skreq->id, skreq->fitmsg_id);
5320 pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
5321 skdev->name, __func__, __LINE__,
5322 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5324 if (skreq->req != NULL) {
5325 struct request *req = skreq->req;
5326 u32 lba = (u32)blk_rq_pos(req);
5327 u32 count = blk_rq_sectors(req);
5329 pr_debug("%s:%s:%d "
5330 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5331 skdev->name, __func__, __LINE__,
5332 req, lba, lba, count, count,
5333 (int)rq_data_dir(req));
5335 pr_debug("%s:%s:%d req=NULL\n",
5336 skdev->name, __func__, __LINE__);
5340 *****************************************************************************
5342 *****************************************************************************
5345 static int __init skd_init(void)
5347 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5349 switch (skd_isr_type) {
5350 case SKD_IRQ_LEGACY:
5355 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5356 skd_isr_type, SKD_IRQ_DEFAULT);
5357 skd_isr_type = SKD_IRQ_DEFAULT;
5360 if (skd_max_queue_depth < 1 ||
5361 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5362 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5363 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5364 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5367 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5368 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5369 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5370 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5373 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5374 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5375 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5376 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5379 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5380 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5385 if (skd_isr_comp_limit < 0) {
5386 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5387 skd_isr_comp_limit, 0);
5388 skd_isr_comp_limit = 0;
5391 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5392 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5393 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5394 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5397 return pci_register_driver(&skd_driver);
5400 static void __exit skd_exit(void)
5402 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5404 pci_unregister_driver(&skd_driver);
5407 unregister_blkdev(skd_major, DRV_NAME);
5410 module_init(skd_init);
5411 module_exit(skd_exit);