2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/seq_file.h>
33 #include <linux/init.h>
34 #include <linux/spinlock.h>
35 #include <linux/compat.h>
36 #include <linux/blktrace_api.h>
37 #include <linux/uaccess.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/completion.h>
41 #include <linux/moduleparam.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/kthread.h>
52 #include <linux/jiffies.h>
56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57 #define HPSA_DRIVER_VERSION "2.0.2-1"
58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
60 /* How long to wait (in milliseconds) for board to go into simple mode */
61 #define MAX_CONFIG_WAIT 30000
62 #define MAX_IOCTL_CONFIG_WAIT 1000
64 /*define how many times we will try a command because of bus resets */
65 #define MAX_CMD_RETRIES 3
67 /* Embedded module documentation macros - see modules.h */
68 MODULE_AUTHOR("Hewlett-Packard Company");
69 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
72 MODULE_VERSION(HPSA_DRIVER_VERSION);
73 MODULE_LICENSE("GPL");
75 static int hpsa_allow_any;
76 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
77 MODULE_PARM_DESC(hpsa_allow_any,
78 "Allow hpsa driver to access unknown HP Smart Array hardware");
79 static int hpsa_simple_mode;
80 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
81 MODULE_PARM_DESC(hpsa_simple_mode,
82 "Use 'simple mode' rather than 'performant mode'");
84 /* define the PCI info for the cards we can control */
85 static const struct pci_device_id hpsa_pci_device_id[] = {
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
110 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
111 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
115 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
117 /* board_id = Subsystem Device ID & Vendor ID
118 * product = Marketing Name for the board
119 * access = Address of the struct of function pointers
121 static struct board_type products[] = {
122 {0x3241103C, "Smart Array P212", &SA5_access},
123 {0x3243103C, "Smart Array P410", &SA5_access},
124 {0x3245103C, "Smart Array P410i", &SA5_access},
125 {0x3247103C, "Smart Array P411", &SA5_access},
126 {0x3249103C, "Smart Array P812", &SA5_access},
127 {0x324a103C, "Smart Array P712m", &SA5_access},
128 {0x324b103C, "Smart Array P711m", &SA5_access},
129 {0x3350103C, "Smart Array P222", &SA5_access},
130 {0x3351103C, "Smart Array P420", &SA5_access},
131 {0x3352103C, "Smart Array P421", &SA5_access},
132 {0x3353103C, "Smart Array P822", &SA5_access},
133 {0x3354103C, "Smart Array P420i", &SA5_access},
134 {0x3355103C, "Smart Array P220i", &SA5_access},
135 {0x3356103C, "Smart Array P721m", &SA5_access},
136 {0x1920103C, "Smart Array", &SA5_access},
137 {0x1921103C, "Smart Array", &SA5_access},
138 {0x1922103C, "Smart Array", &SA5_access},
139 {0x1923103C, "Smart Array", &SA5_access},
140 {0x1924103C, "Smart Array", &SA5_access},
141 {0x1925103C, "Smart Array", &SA5_access},
142 {0x1926103C, "Smart Array", &SA5_access},
143 {0x1928103C, "Smart Array", &SA5_access},
144 {0x334d103C, "Smart Array P822se", &SA5_access},
145 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
148 static int number_of_controllers;
150 static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
151 static spinlock_t lockup_detector_lock;
152 static struct task_struct *hpsa_lockup_detector;
154 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
155 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
156 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
157 static void start_io(struct ctlr_info *h);
160 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
163 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
164 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
165 static struct CommandList *cmd_alloc(struct ctlr_info *h);
166 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
167 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
168 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
171 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
172 static void hpsa_scan_start(struct Scsi_Host *);
173 static int hpsa_scan_finished(struct Scsi_Host *sh,
174 unsigned long elapsed_time);
175 static int hpsa_change_queue_depth(struct scsi_device *sdev,
176 int qdepth, int reason);
178 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
179 static int hpsa_slave_alloc(struct scsi_device *sdev);
180 static void hpsa_slave_destroy(struct scsi_device *sdev);
182 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
183 static int check_for_unit_attention(struct ctlr_info *h,
184 struct CommandList *c);
185 static void check_ioctl_unit_attention(struct ctlr_info *h,
186 struct CommandList *c);
187 /* performant mode helper functions */
188 static void calc_bucket_map(int *bucket, int num_buckets,
189 int nsgs, int *bucket_map);
190 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
191 static inline u32 next_command(struct ctlr_info *h);
192 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
193 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
195 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
196 unsigned long *memory_bar);
197 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
198 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
199 void __iomem *vaddr, int wait_for_ready);
200 #define BOARD_NOT_READY 0
201 #define BOARD_READY 1
203 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
205 unsigned long *priv = shost_priv(sdev->host);
206 return (struct ctlr_info *) *priv;
209 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
211 unsigned long *priv = shost_priv(sh);
212 return (struct ctlr_info *) *priv;
215 static int check_for_unit_attention(struct ctlr_info *h,
216 struct CommandList *c)
218 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
221 switch (c->err_info->SenseInfo[12]) {
223 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
224 "detected, command retried\n", h->ctlr);
227 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
228 "detected, action required\n", h->ctlr);
230 case REPORT_LUNS_CHANGED:
231 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
232 "changed, action required\n", h->ctlr);
234 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
238 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
239 "or device reset detected\n", h->ctlr);
241 case UNIT_ATTENTION_CLEARED:
242 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
243 "cleared by another initiator\n", h->ctlr);
246 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
247 "unit attention detected\n", h->ctlr);
253 static ssize_t host_store_rescan(struct device *dev,
254 struct device_attribute *attr,
255 const char *buf, size_t count)
258 struct Scsi_Host *shost = class_to_shost(dev);
259 h = shost_to_hba(shost);
260 hpsa_scan_start(h->scsi_host);
264 static ssize_t host_show_firmware_revision(struct device *dev,
265 struct device_attribute *attr, char *buf)
268 struct Scsi_Host *shost = class_to_shost(dev);
269 unsigned char *fwrev;
271 h = shost_to_hba(shost);
272 if (!h->hba_inquiry_data)
274 fwrev = &h->hba_inquiry_data[32];
275 return snprintf(buf, 20, "%c%c%c%c\n",
276 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
279 static ssize_t host_show_commands_outstanding(struct device *dev,
280 struct device_attribute *attr, char *buf)
282 struct Scsi_Host *shost = class_to_shost(dev);
283 struct ctlr_info *h = shost_to_hba(shost);
285 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
288 static ssize_t host_show_transport_mode(struct device *dev,
289 struct device_attribute *attr, char *buf)
292 struct Scsi_Host *shost = class_to_shost(dev);
294 h = shost_to_hba(shost);
295 return snprintf(buf, 20, "%s\n",
296 h->transMethod & CFGTBL_Trans_Performant ?
297 "performant" : "simple");
300 /* List of controllers which cannot be hard reset on kexec with reset_devices */
301 static u32 unresettable_controller[] = {
302 0x324a103C, /* Smart Array P712m */
303 0x324b103C, /* SmartArray P711m */
304 0x3223103C, /* Smart Array P800 */
305 0x3234103C, /* Smart Array P400 */
306 0x3235103C, /* Smart Array P400i */
307 0x3211103C, /* Smart Array E200i */
308 0x3212103C, /* Smart Array E200 */
309 0x3213103C, /* Smart Array E200i */
310 0x3214103C, /* Smart Array E200i */
311 0x3215103C, /* Smart Array E200i */
312 0x3237103C, /* Smart Array E500 */
313 0x323D103C, /* Smart Array P700m */
314 0x409C0E11, /* Smart Array 6400 */
315 0x409D0E11, /* Smart Array 6400 EM */
318 /* List of controllers which cannot even be soft reset */
319 static u32 soft_unresettable_controller[] = {
320 /* Exclude 640x boards. These are two pci devices in one slot
321 * which share a battery backed cache module. One controls the
322 * cache, the other accesses the cache through the one that controls
323 * it. If we reset the one controlling the cache, the other will
324 * likely not be happy. Just forbid resetting this conjoined mess.
325 * The 640x isn't really supported by hpsa anyway.
327 0x409C0E11, /* Smart Array 6400 */
328 0x409D0E11, /* Smart Array 6400 EM */
331 static int ctlr_is_hard_resettable(u32 board_id)
335 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
336 if (unresettable_controller[i] == board_id)
341 static int ctlr_is_soft_resettable(u32 board_id)
345 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
346 if (soft_unresettable_controller[i] == board_id)
351 static int ctlr_is_resettable(u32 board_id)
353 return ctlr_is_hard_resettable(board_id) ||
354 ctlr_is_soft_resettable(board_id);
357 static ssize_t host_show_resettable(struct device *dev,
358 struct device_attribute *attr, char *buf)
361 struct Scsi_Host *shost = class_to_shost(dev);
363 h = shost_to_hba(shost);
364 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
367 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
369 return (scsi3addr[3] & 0xC0) == 0x40;
372 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
375 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
377 static ssize_t raid_level_show(struct device *dev,
378 struct device_attribute *attr, char *buf)
381 unsigned char rlevel;
383 struct scsi_device *sdev;
384 struct hpsa_scsi_dev_t *hdev;
387 sdev = to_scsi_device(dev);
388 h = sdev_to_hba(sdev);
389 spin_lock_irqsave(&h->lock, flags);
390 hdev = sdev->hostdata;
392 spin_unlock_irqrestore(&h->lock, flags);
396 /* Is this even a logical drive? */
397 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
398 spin_unlock_irqrestore(&h->lock, flags);
399 l = snprintf(buf, PAGE_SIZE, "N/A\n");
403 rlevel = hdev->raid_level;
404 spin_unlock_irqrestore(&h->lock, flags);
405 if (rlevel > RAID_UNKNOWN)
406 rlevel = RAID_UNKNOWN;
407 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
411 static ssize_t lunid_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
415 struct scsi_device *sdev;
416 struct hpsa_scsi_dev_t *hdev;
418 unsigned char lunid[8];
420 sdev = to_scsi_device(dev);
421 h = sdev_to_hba(sdev);
422 spin_lock_irqsave(&h->lock, flags);
423 hdev = sdev->hostdata;
425 spin_unlock_irqrestore(&h->lock, flags);
428 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
429 spin_unlock_irqrestore(&h->lock, flags);
430 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
431 lunid[0], lunid[1], lunid[2], lunid[3],
432 lunid[4], lunid[5], lunid[6], lunid[7]);
435 static ssize_t unique_id_show(struct device *dev,
436 struct device_attribute *attr, char *buf)
439 struct scsi_device *sdev;
440 struct hpsa_scsi_dev_t *hdev;
442 unsigned char sn[16];
444 sdev = to_scsi_device(dev);
445 h = sdev_to_hba(sdev);
446 spin_lock_irqsave(&h->lock, flags);
447 hdev = sdev->hostdata;
449 spin_unlock_irqrestore(&h->lock, flags);
452 memcpy(sn, hdev->device_id, sizeof(sn));
453 spin_unlock_irqrestore(&h->lock, flags);
454 return snprintf(buf, 16 * 2 + 2,
455 "%02X%02X%02X%02X%02X%02X%02X%02X"
456 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
457 sn[0], sn[1], sn[2], sn[3],
458 sn[4], sn[5], sn[6], sn[7],
459 sn[8], sn[9], sn[10], sn[11],
460 sn[12], sn[13], sn[14], sn[15]);
463 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
464 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
465 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
466 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
467 static DEVICE_ATTR(firmware_revision, S_IRUGO,
468 host_show_firmware_revision, NULL);
469 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
470 host_show_commands_outstanding, NULL);
471 static DEVICE_ATTR(transport_mode, S_IRUGO,
472 host_show_transport_mode, NULL);
473 static DEVICE_ATTR(resettable, S_IRUGO,
474 host_show_resettable, NULL);
476 static struct device_attribute *hpsa_sdev_attrs[] = {
477 &dev_attr_raid_level,
483 static struct device_attribute *hpsa_shost_attrs[] = {
485 &dev_attr_firmware_revision,
486 &dev_attr_commands_outstanding,
487 &dev_attr_transport_mode,
488 &dev_attr_resettable,
492 static struct scsi_host_template hpsa_driver_template = {
493 .module = THIS_MODULE,
496 .queuecommand = hpsa_scsi_queue_command,
497 .scan_start = hpsa_scan_start,
498 .scan_finished = hpsa_scan_finished,
499 .change_queue_depth = hpsa_change_queue_depth,
501 .use_clustering = ENABLE_CLUSTERING,
502 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
504 .slave_alloc = hpsa_slave_alloc,
505 .slave_destroy = hpsa_slave_destroy,
507 .compat_ioctl = hpsa_compat_ioctl,
509 .sdev_attrs = hpsa_sdev_attrs,
510 .shost_attrs = hpsa_shost_attrs,
515 /* Enqueuing and dequeuing functions for cmdlists. */
516 static inline void addQ(struct list_head *list, struct CommandList *c)
518 list_add_tail(&c->list, list);
521 static inline u32 next_command(struct ctlr_info *h)
525 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
526 return h->access.command_completed(h);
528 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
529 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
530 (h->reply_pool_head)++;
531 h->commands_outstanding--;
535 /* Check for wraparound */
536 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
537 h->reply_pool_head = h->reply_pool;
538 h->reply_pool_wraparound ^= 1;
543 /* set_performant_mode: Modify the tag for cciss performant
544 * set bit 0 for pull model, bits 3-1 for block fetch
547 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
549 if (likely(h->transMethod & CFGTBL_Trans_Performant))
550 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
553 static int is_firmware_flash_cmd(u8 *cdb)
555 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
559 * During firmware flash, the heartbeat register may not update as frequently
560 * as it should. So we dial down lockup detection during firmware flash. and
561 * dial it back up when firmware flash completes.
563 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
564 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
565 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
566 struct CommandList *c)
568 if (!is_firmware_flash_cmd(c->Request.CDB))
570 atomic_inc(&h->firmware_flash_in_progress);
571 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
574 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
575 struct CommandList *c)
577 if (is_firmware_flash_cmd(c->Request.CDB) &&
578 atomic_dec_and_test(&h->firmware_flash_in_progress))
579 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
582 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
583 struct CommandList *c)
587 set_performant_mode(h, c);
588 dial_down_lockup_detection_during_fw_flash(h, c);
589 spin_lock_irqsave(&h->lock, flags);
593 spin_unlock_irqrestore(&h->lock, flags);
596 static inline void removeQ(struct CommandList *c)
598 if (WARN_ON(list_empty(&c->list)))
600 list_del_init(&c->list);
603 static inline int is_hba_lunid(unsigned char scsi3addr[])
605 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
608 static inline int is_scsi_rev_5(struct ctlr_info *h)
610 if (!h->hba_inquiry_data)
612 if ((h->hba_inquiry_data[2] & 0x07) == 5)
617 static int hpsa_find_target_lun(struct ctlr_info *h,
618 unsigned char scsi3addr[], int bus, int *target, int *lun)
620 /* finds an unused bus, target, lun for a new physical device
621 * assumes h->devlock is held
624 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
626 memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
628 for (i = 0; i < h->ndevices; i++) {
629 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
630 set_bit(h->dev[i]->target, lun_taken);
633 for (i = 0; i < HPSA_MAX_DEVICES; i++) {
634 if (!test_bit(i, lun_taken)) {
645 /* Add an entry into h->dev[] array. */
646 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
647 struct hpsa_scsi_dev_t *device,
648 struct hpsa_scsi_dev_t *added[], int *nadded)
650 /* assumes h->devlock is held */
653 unsigned char addr1[8], addr2[8];
654 struct hpsa_scsi_dev_t *sd;
656 if (n >= HPSA_MAX_DEVICES) {
657 dev_err(&h->pdev->dev, "too many devices, some will be "
662 /* physical devices do not have lun or target assigned until now. */
663 if (device->lun != -1)
664 /* Logical device, lun is already assigned. */
667 /* If this device a non-zero lun of a multi-lun device
668 * byte 4 of the 8-byte LUN addr will contain the logical
669 * unit no, zero otherise.
671 if (device->scsi3addr[4] == 0) {
672 /* This is not a non-zero lun of a multi-lun device */
673 if (hpsa_find_target_lun(h, device->scsi3addr,
674 device->bus, &device->target, &device->lun) != 0)
679 /* This is a non-zero lun of a multi-lun device.
680 * Search through our list and find the device which
681 * has the same 8 byte LUN address, excepting byte 4.
682 * Assign the same bus and target for this new LUN.
683 * Use the logical unit number from the firmware.
685 memcpy(addr1, device->scsi3addr, 8);
687 for (i = 0; i < n; i++) {
689 memcpy(addr2, sd->scsi3addr, 8);
691 /* differ only in byte 4? */
692 if (memcmp(addr1, addr2, 8) == 0) {
693 device->bus = sd->bus;
694 device->target = sd->target;
695 device->lun = device->scsi3addr[4];
699 if (device->lun == -1) {
700 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
701 " suspect firmware bug or unsupported hardware "
710 added[*nadded] = device;
713 /* initially, (before registering with scsi layer) we don't
714 * know our hostno and we don't want to print anything first
715 * time anyway (the scsi layer's inquiries will show that info)
717 /* if (hostno != -1) */
718 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
719 scsi_device_type(device->devtype), hostno,
720 device->bus, device->target, device->lun);
724 /* Replace an entry from h->dev[] array. */
725 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
726 int entry, struct hpsa_scsi_dev_t *new_entry,
727 struct hpsa_scsi_dev_t *added[], int *nadded,
728 struct hpsa_scsi_dev_t *removed[], int *nremoved)
730 /* assumes h->devlock is held */
731 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
732 removed[*nremoved] = h->dev[entry];
736 * New physical devices won't have target/lun assigned yet
737 * so we need to preserve the values in the slot we are replacing.
739 if (new_entry->target == -1) {
740 new_entry->target = h->dev[entry]->target;
741 new_entry->lun = h->dev[entry]->lun;
744 h->dev[entry] = new_entry;
745 added[*nadded] = new_entry;
747 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
748 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
749 new_entry->target, new_entry->lun);
752 /* Remove an entry from h->dev[] array. */
753 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
754 struct hpsa_scsi_dev_t *removed[], int *nremoved)
756 /* assumes h->devlock is held */
758 struct hpsa_scsi_dev_t *sd;
760 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
763 removed[*nremoved] = h->dev[entry];
766 for (i = entry; i < h->ndevices-1; i++)
767 h->dev[i] = h->dev[i+1];
769 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
770 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
774 #define SCSI3ADDR_EQ(a, b) ( \
775 (a)[7] == (b)[7] && \
776 (a)[6] == (b)[6] && \
777 (a)[5] == (b)[5] && \
778 (a)[4] == (b)[4] && \
779 (a)[3] == (b)[3] && \
780 (a)[2] == (b)[2] && \
781 (a)[1] == (b)[1] && \
784 static void fixup_botched_add(struct ctlr_info *h,
785 struct hpsa_scsi_dev_t *added)
787 /* called when scsi_add_device fails in order to re-adjust
788 * h->dev[] to match the mid layer's view.
793 spin_lock_irqsave(&h->lock, flags);
794 for (i = 0; i < h->ndevices; i++) {
795 if (h->dev[i] == added) {
796 for (j = i; j < h->ndevices-1; j++)
797 h->dev[j] = h->dev[j+1];
802 spin_unlock_irqrestore(&h->lock, flags);
806 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
807 struct hpsa_scsi_dev_t *dev2)
809 /* we compare everything except lun and target as these
810 * are not yet assigned. Compare parts likely
813 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
814 sizeof(dev1->scsi3addr)) != 0)
816 if (memcmp(dev1->device_id, dev2->device_id,
817 sizeof(dev1->device_id)) != 0)
819 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
821 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
823 if (dev1->devtype != dev2->devtype)
825 if (dev1->bus != dev2->bus)
830 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
831 * and return needle location in *index. If scsi3addr matches, but not
832 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
833 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
835 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
836 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
840 #define DEVICE_NOT_FOUND 0
841 #define DEVICE_CHANGED 1
842 #define DEVICE_SAME 2
843 for (i = 0; i < haystack_size; i++) {
844 if (haystack[i] == NULL) /* previously removed. */
846 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
848 if (device_is_the_same(needle, haystack[i]))
851 return DEVICE_CHANGED;
855 return DEVICE_NOT_FOUND;
858 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
859 struct hpsa_scsi_dev_t *sd[], int nsds)
861 /* sd contains scsi3 addresses and devtypes, and inquiry
862 * data. This function takes what's in sd to be the current
863 * reality and updates h->dev[] to reflect that reality.
865 int i, entry, device_change, changes = 0;
866 struct hpsa_scsi_dev_t *csd;
868 struct hpsa_scsi_dev_t **added, **removed;
869 int nadded, nremoved;
870 struct Scsi_Host *sh = NULL;
872 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
873 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
875 if (!added || !removed) {
876 dev_warn(&h->pdev->dev, "out of memory in "
877 "adjust_hpsa_scsi_table\n");
881 spin_lock_irqsave(&h->devlock, flags);
883 /* find any devices in h->dev[] that are not in
884 * sd[] and remove them from h->dev[], and for any
885 * devices which have changed, remove the old device
886 * info and add the new device info.
891 while (i < h->ndevices) {
893 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
894 if (device_change == DEVICE_NOT_FOUND) {
896 hpsa_scsi_remove_entry(h, hostno, i,
898 continue; /* remove ^^^, hence i not incremented */
899 } else if (device_change == DEVICE_CHANGED) {
901 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
902 added, &nadded, removed, &nremoved);
903 /* Set it to NULL to prevent it from being freed
904 * at the bottom of hpsa_update_scsi_devices()
911 /* Now, make sure every device listed in sd[] is also
912 * listed in h->dev[], adding them if they aren't found
915 for (i = 0; i < nsds; i++) {
916 if (!sd[i]) /* if already added above. */
918 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
919 h->ndevices, &entry);
920 if (device_change == DEVICE_NOT_FOUND) {
922 if (hpsa_scsi_add_entry(h, hostno, sd[i],
923 added, &nadded) != 0)
925 sd[i] = NULL; /* prevent from being freed later. */
926 } else if (device_change == DEVICE_CHANGED) {
927 /* should never happen... */
929 dev_warn(&h->pdev->dev,
930 "device unexpectedly changed.\n");
931 /* but if it does happen, we just ignore that device */
934 spin_unlock_irqrestore(&h->devlock, flags);
936 /* Don't notify scsi mid layer of any changes the first time through
937 * (or if there are no changes) scsi_scan_host will do it later the
938 * first time through.
940 if (hostno == -1 || !changes)
944 /* Notify scsi mid layer of any removed devices */
945 for (i = 0; i < nremoved; i++) {
946 struct scsi_device *sdev =
947 scsi_device_lookup(sh, removed[i]->bus,
948 removed[i]->target, removed[i]->lun);
950 scsi_remove_device(sdev);
951 scsi_device_put(sdev);
953 /* We don't expect to get here.
954 * future cmds to this device will get selection
955 * timeout as if the device was gone.
957 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
958 " for removal.", hostno, removed[i]->bus,
959 removed[i]->target, removed[i]->lun);
965 /* Notify scsi mid layer of any added devices */
966 for (i = 0; i < nadded; i++) {
967 if (scsi_add_device(sh, added[i]->bus,
968 added[i]->target, added[i]->lun) == 0)
970 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
971 "device not added.\n", hostno, added[i]->bus,
972 added[i]->target, added[i]->lun);
973 /* now we have to remove it from h->dev,
974 * since it didn't get added to scsi mid layer
976 fixup_botched_add(h, added[i]);
985 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
986 * Assume's h->devlock is held.
988 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
989 int bus, int target, int lun)
992 struct hpsa_scsi_dev_t *sd;
994 for (i = 0; i < h->ndevices; i++) {
996 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1002 /* link sdev->hostdata to our per-device structure. */
1003 static int hpsa_slave_alloc(struct scsi_device *sdev)
1005 struct hpsa_scsi_dev_t *sd;
1006 unsigned long flags;
1007 struct ctlr_info *h;
1009 h = sdev_to_hba(sdev);
1010 spin_lock_irqsave(&h->devlock, flags);
1011 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1012 sdev_id(sdev), sdev->lun);
1014 sdev->hostdata = sd;
1015 spin_unlock_irqrestore(&h->devlock, flags);
1019 static void hpsa_slave_destroy(struct scsi_device *sdev)
1021 /* nothing to do. */
1024 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1028 if (!h->cmd_sg_list)
1030 for (i = 0; i < h->nr_cmds; i++) {
1031 kfree(h->cmd_sg_list[i]);
1032 h->cmd_sg_list[i] = NULL;
1034 kfree(h->cmd_sg_list);
1035 h->cmd_sg_list = NULL;
1038 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1042 if (h->chainsize <= 0)
1045 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1047 if (!h->cmd_sg_list)
1049 for (i = 0; i < h->nr_cmds; i++) {
1050 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1051 h->chainsize, GFP_KERNEL);
1052 if (!h->cmd_sg_list[i])
1058 hpsa_free_sg_chain_blocks(h);
1062 static void hpsa_map_sg_chain_block(struct ctlr_info *h,
1063 struct CommandList *c)
1065 struct SGDescriptor *chain_sg, *chain_block;
1068 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1069 chain_block = h->cmd_sg_list[c->cmdindex];
1070 chain_sg->Ext = HPSA_SG_CHAIN;
1071 chain_sg->Len = sizeof(*chain_sg) *
1072 (c->Header.SGTotal - h->max_cmd_sg_entries);
1073 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1075 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1076 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1079 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1080 struct CommandList *c)
1082 struct SGDescriptor *chain_sg;
1083 union u64bit temp64;
1085 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1088 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1089 temp64.val32.lower = chain_sg->Addr.lower;
1090 temp64.val32.upper = chain_sg->Addr.upper;
1091 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1094 static void complete_scsi_command(struct CommandList *cp)
1096 struct scsi_cmnd *cmd;
1097 struct ctlr_info *h;
1098 struct ErrorInfo *ei;
1100 unsigned char sense_key;
1101 unsigned char asc; /* additional sense code */
1102 unsigned char ascq; /* additional sense code qualifier */
1103 unsigned long sense_data_size;
1106 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1109 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1110 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
1111 hpsa_unmap_sg_chain_block(h, cp);
1113 cmd->result = (DID_OK << 16); /* host byte */
1114 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1115 cmd->result |= ei->ScsiStatus;
1117 /* copy the sense data whether we need to or not. */
1118 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1119 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1121 sense_data_size = sizeof(ei->SenseInfo);
1122 if (ei->SenseLen < sense_data_size)
1123 sense_data_size = ei->SenseLen;
1125 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1126 scsi_set_resid(cmd, ei->ResidualCnt);
1128 if (ei->CommandStatus == 0) {
1129 cmd->scsi_done(cmd);
1134 /* an error has occurred */
1135 switch (ei->CommandStatus) {
1137 case CMD_TARGET_STATUS:
1138 if (ei->ScsiStatus) {
1140 sense_key = 0xf & ei->SenseInfo[2];
1141 /* Get additional sense code */
1142 asc = ei->SenseInfo[12];
1143 /* Get addition sense code qualifier */
1144 ascq = ei->SenseInfo[13];
1147 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1148 if (check_for_unit_attention(h, cp)) {
1149 cmd->result = DID_SOFT_ERROR << 16;
1152 if (sense_key == ILLEGAL_REQUEST) {
1154 * SCSI REPORT_LUNS is commonly unsupported on
1155 * Smart Array. Suppress noisy complaint.
1157 if (cp->Request.CDB[0] == REPORT_LUNS)
1160 /* If ASC/ASCQ indicate Logical Unit
1161 * Not Supported condition,
1163 if ((asc == 0x25) && (ascq == 0x0)) {
1164 dev_warn(&h->pdev->dev, "cp %p "
1165 "has check condition\n", cp);
1170 if (sense_key == NOT_READY) {
1171 /* If Sense is Not Ready, Logical Unit
1172 * Not ready, Manual Intervention
1175 if ((asc == 0x04) && (ascq == 0x03)) {
1176 dev_warn(&h->pdev->dev, "cp %p "
1177 "has check condition: unit "
1178 "not ready, manual "
1179 "intervention required\n", cp);
1183 if (sense_key == ABORTED_COMMAND) {
1184 /* Aborted command is retryable */
1185 dev_warn(&h->pdev->dev, "cp %p "
1186 "has check condition: aborted command: "
1187 "ASC: 0x%x, ASCQ: 0x%x\n",
1189 cmd->result = DID_SOFT_ERROR << 16;
1192 /* Must be some other type of check condition */
1193 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1195 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1196 "Returning result: 0x%x, "
1197 "cmd=[%02x %02x %02x %02x %02x "
1198 "%02x %02x %02x %02x %02x %02x "
1199 "%02x %02x %02x %02x %02x]\n",
1200 cp, sense_key, asc, ascq,
1202 cmd->cmnd[0], cmd->cmnd[1],
1203 cmd->cmnd[2], cmd->cmnd[3],
1204 cmd->cmnd[4], cmd->cmnd[5],
1205 cmd->cmnd[6], cmd->cmnd[7],
1206 cmd->cmnd[8], cmd->cmnd[9],
1207 cmd->cmnd[10], cmd->cmnd[11],
1208 cmd->cmnd[12], cmd->cmnd[13],
1209 cmd->cmnd[14], cmd->cmnd[15]);
1214 /* Problem was not a check condition
1215 * Pass it up to the upper layers...
1217 if (ei->ScsiStatus) {
1218 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1219 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1220 "Returning result: 0x%x\n",
1222 sense_key, asc, ascq,
1224 } else { /* scsi status is zero??? How??? */
1225 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1226 "Returning no connection.\n", cp),
1228 /* Ordinarily, this case should never happen,
1229 * but there is a bug in some released firmware
1230 * revisions that allows it to happen if, for
1231 * example, a 4100 backplane loses power and
1232 * the tape drive is in it. We assume that
1233 * it's a fatal error of some kind because we
1234 * can't show that it wasn't. We will make it
1235 * look like selection timeout since that is
1236 * the most common reason for this to occur,
1237 * and it's severe enough.
1240 cmd->result = DID_NO_CONNECT << 16;
1244 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1246 case CMD_DATA_OVERRUN:
1247 dev_warn(&h->pdev->dev, "cp %p has"
1248 " completed with data overrun "
1252 /* print_bytes(cp, sizeof(*cp), 1, 0);
1254 /* We get CMD_INVALID if you address a non-existent device
1255 * instead of a selection timeout (no response). You will
1256 * see this if you yank out a drive, then try to access it.
1257 * This is kind of a shame because it means that any other
1258 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1259 * missing target. */
1260 cmd->result = DID_NO_CONNECT << 16;
1263 case CMD_PROTOCOL_ERR:
1264 cmd->result = DID_ERROR << 16;
1265 dev_warn(&h->pdev->dev, "cp %p has "
1266 "protocol error\n", cp);
1268 case CMD_HARDWARE_ERR:
1269 cmd->result = DID_ERROR << 16;
1270 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1272 case CMD_CONNECTION_LOST:
1273 cmd->result = DID_ERROR << 16;
1274 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1277 cmd->result = DID_ABORT << 16;
1278 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1279 cp, ei->ScsiStatus);
1281 case CMD_ABORT_FAILED:
1282 cmd->result = DID_ERROR << 16;
1283 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1285 case CMD_UNSOLICITED_ABORT:
1286 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1287 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1291 cmd->result = DID_TIME_OUT << 16;
1292 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1294 case CMD_UNABORTABLE:
1295 cmd->result = DID_ERROR << 16;
1296 dev_warn(&h->pdev->dev, "Command unabortable\n");
1299 cmd->result = DID_ERROR << 16;
1300 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1301 cp, ei->CommandStatus);
1303 cmd->scsi_done(cmd);
1307 static int hpsa_scsi_detect(struct ctlr_info *h)
1309 struct Scsi_Host *sh;
1312 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1319 sh->max_channel = 3;
1320 sh->max_cmd_len = MAX_COMMAND_SIZE;
1321 sh->max_lun = HPSA_MAX_LUN;
1322 sh->max_id = HPSA_MAX_LUN;
1323 sh->can_queue = h->nr_cmds;
1324 sh->cmd_per_lun = h->nr_cmds;
1325 sh->sg_tablesize = h->maxsgentries;
1327 sh->hostdata[0] = (unsigned long) h;
1328 sh->irq = h->intr[h->intr_mode];
1329 sh->unique_id = sh->irq;
1330 error = scsi_add_host(sh, &h->pdev->dev);
1337 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1338 " failed for controller %d\n", h->ctlr);
1342 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1343 " failed for controller %d\n", h->ctlr);
1347 static void hpsa_pci_unmap(struct pci_dev *pdev,
1348 struct CommandList *c, int sg_used, int data_direction)
1351 union u64bit addr64;
1353 for (i = 0; i < sg_used; i++) {
1354 addr64.val32.lower = c->SG[i].Addr.lower;
1355 addr64.val32.upper = c->SG[i].Addr.upper;
1356 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1361 static void hpsa_map_one(struct pci_dev *pdev,
1362 struct CommandList *cp,
1369 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1370 cp->Header.SGList = 0;
1371 cp->Header.SGTotal = 0;
1375 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1376 cp->SG[0].Addr.lower =
1377 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1378 cp->SG[0].Addr.upper =
1379 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1380 cp->SG[0].Len = buflen;
1381 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1382 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1385 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1386 struct CommandList *c)
1388 DECLARE_COMPLETION_ONSTACK(wait);
1391 enqueue_cmd_and_start_io(h, c);
1392 wait_for_completion(&wait);
1395 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1396 struct CommandList *c)
1398 unsigned long flags;
1400 /* If controller lockup detected, fake a hardware error. */
1401 spin_lock_irqsave(&h->lock, flags);
1402 if (unlikely(h->lockup_detected)) {
1403 spin_unlock_irqrestore(&h->lock, flags);
1404 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1406 spin_unlock_irqrestore(&h->lock, flags);
1407 hpsa_scsi_do_simple_cmd_core(h, c);
1411 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1412 struct CommandList *c, int data_direction)
1414 int retry_count = 0;
1417 memset(c->err_info, 0, sizeof(*c->err_info));
1418 hpsa_scsi_do_simple_cmd_core(h, c);
1420 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1421 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1424 static void hpsa_scsi_interpret_error(struct CommandList *cp)
1426 struct ErrorInfo *ei;
1427 struct device *d = &cp->h->pdev->dev;
1430 switch (ei->CommandStatus) {
1431 case CMD_TARGET_STATUS:
1432 dev_warn(d, "cmd %p has completed with errors\n", cp);
1433 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1435 if (ei->ScsiStatus == 0)
1436 dev_warn(d, "SCSI status is abnormally zero. "
1437 "(probably indicates selection timeout "
1438 "reported incorrectly due to a known "
1439 "firmware bug, circa July, 2001.)\n");
1441 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1442 dev_info(d, "UNDERRUN\n");
1444 case CMD_DATA_OVERRUN:
1445 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1448 /* controller unfortunately reports SCSI passthru's
1449 * to non-existent targets as invalid commands.
1451 dev_warn(d, "cp %p is reported invalid (probably means "
1452 "target device no longer present)\n", cp);
1453 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1457 case CMD_PROTOCOL_ERR:
1458 dev_warn(d, "cp %p has protocol error \n", cp);
1460 case CMD_HARDWARE_ERR:
1461 /* cmd->result = DID_ERROR << 16; */
1462 dev_warn(d, "cp %p had hardware error\n", cp);
1464 case CMD_CONNECTION_LOST:
1465 dev_warn(d, "cp %p had connection lost\n", cp);
1468 dev_warn(d, "cp %p was aborted\n", cp);
1470 case CMD_ABORT_FAILED:
1471 dev_warn(d, "cp %p reports abort failed\n", cp);
1473 case CMD_UNSOLICITED_ABORT:
1474 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1477 dev_warn(d, "cp %p timed out\n", cp);
1479 case CMD_UNABORTABLE:
1480 dev_warn(d, "Command unabortable\n");
1483 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1488 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1489 unsigned char page, unsigned char *buf,
1490 unsigned char bufsize)
1493 struct CommandList *c;
1494 struct ErrorInfo *ei;
1496 c = cmd_special_alloc(h);
1498 if (c == NULL) { /* trouble... */
1499 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1503 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1504 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1506 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1507 hpsa_scsi_interpret_error(c);
1510 cmd_special_free(h, c);
1514 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1517 struct CommandList *c;
1518 struct ErrorInfo *ei;
1520 c = cmd_special_alloc(h);
1522 if (c == NULL) { /* trouble... */
1523 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1527 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1528 hpsa_scsi_do_simple_cmd_core(h, c);
1529 /* no unmap needed here because no data xfer. */
1532 if (ei->CommandStatus != 0) {
1533 hpsa_scsi_interpret_error(c);
1536 cmd_special_free(h, c);
1540 static void hpsa_get_raid_level(struct ctlr_info *h,
1541 unsigned char *scsi3addr, unsigned char *raid_level)
1546 *raid_level = RAID_UNKNOWN;
1547 buf = kzalloc(64, GFP_KERNEL);
1550 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1552 *raid_level = buf[8];
1553 if (*raid_level > RAID_UNKNOWN)
1554 *raid_level = RAID_UNKNOWN;
1559 /* Get the device id from inquiry page 0x83 */
1560 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1561 unsigned char *device_id, int buflen)
1568 buf = kzalloc(64, GFP_KERNEL);
1571 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1573 memcpy(device_id, &buf[8], buflen);
1578 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1579 struct ReportLUNdata *buf, int bufsize,
1580 int extended_response)
1583 struct CommandList *c;
1584 unsigned char scsi3addr[8];
1585 struct ErrorInfo *ei;
1587 c = cmd_special_alloc(h);
1588 if (c == NULL) { /* trouble... */
1589 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1592 /* address the controller */
1593 memset(scsi3addr, 0, sizeof(scsi3addr));
1594 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1595 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1596 if (extended_response)
1597 c->Request.CDB[1] = extended_response;
1598 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1600 if (ei->CommandStatus != 0 &&
1601 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1602 hpsa_scsi_interpret_error(c);
1605 cmd_special_free(h, c);
1609 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1610 struct ReportLUNdata *buf,
1611 int bufsize, int extended_response)
1613 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1616 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1617 struct ReportLUNdata *buf, int bufsize)
1619 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1622 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1623 int bus, int target, int lun)
1626 device->target = target;
1630 static int hpsa_update_device_info(struct ctlr_info *h,
1631 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1632 unsigned char *is_OBDR_device)
1635 #define OBDR_SIG_OFFSET 43
1636 #define OBDR_TAPE_SIG "$DR-10"
1637 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1638 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1640 unsigned char *inq_buff;
1641 unsigned char *obdr_sig;
1643 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1647 /* Do an inquiry to the device to see what it is. */
1648 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1649 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1650 /* Inquiry failed (msg printed already) */
1651 dev_err(&h->pdev->dev,
1652 "hpsa_update_device_info: inquiry failed\n");
1656 this_device->devtype = (inq_buff[0] & 0x1f);
1657 memcpy(this_device->scsi3addr, scsi3addr, 8);
1658 memcpy(this_device->vendor, &inq_buff[8],
1659 sizeof(this_device->vendor));
1660 memcpy(this_device->model, &inq_buff[16],
1661 sizeof(this_device->model));
1662 memset(this_device->device_id, 0,
1663 sizeof(this_device->device_id));
1664 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1665 sizeof(this_device->device_id));
1667 if (this_device->devtype == TYPE_DISK &&
1668 is_logical_dev_addr_mode(scsi3addr))
1669 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1671 this_device->raid_level = RAID_UNKNOWN;
1673 if (is_OBDR_device) {
1674 /* See if this is a One-Button-Disaster-Recovery device
1675 * by looking for "$DR-10" at offset 43 in inquiry data.
1677 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1678 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1679 strncmp(obdr_sig, OBDR_TAPE_SIG,
1680 OBDR_SIG_LEN) == 0);
1691 static unsigned char *msa2xxx_model[] = {
1700 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1704 for (i = 0; msa2xxx_model[i]; i++)
1705 if (strncmp(device->model, msa2xxx_model[i],
1706 strlen(msa2xxx_model[i])) == 0)
1711 /* Helper function to assign bus, target, lun mapping of devices.
1712 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1713 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1714 * Logical drive target and lun are assigned at this time, but
1715 * physical device lun and target assignment are deferred (assigned
1716 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1718 static void figure_bus_target_lun(struct ctlr_info *h,
1719 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1720 struct hpsa_scsi_dev_t *device)
1724 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1725 /* logical device */
1726 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1727 if (is_msa2xxx(h, device)) {
1728 /* msa2xxx way, put logicals on bus 1
1729 * and match target/lun numbers box
1733 *target = (lunid >> 16) & 0x3fff;
1734 *lun = lunid & 0x00ff;
1736 if (likely(is_scsi_rev_5(h))) {
1737 /* All current smart arrays (circa 2011) */
1740 *lun = (lunid & 0x3fff) + 1;
1742 /* Traditional old smart array way. */
1744 *target = lunid & 0x3fff;
1749 /* physical device */
1750 if (is_hba_lunid(lunaddrbytes))
1751 if (unlikely(is_scsi_rev_5(h))) {
1752 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1757 *bus = 3; /* traditional smartarray */
1759 *bus = 2; /* physical disk */
1761 *lun = -1; /* we will fill these in later. */
1766 * If there is no lun 0 on a target, linux won't find any devices.
1767 * For the MSA2xxx boxes, we have to manually detect the enclosure
1768 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1769 * it for some reason. *tmpdevice is the target we're adding,
1770 * this_device is a pointer into the current element of currentsd[]
1771 * that we're building up in update_scsi_devices(), below.
1772 * lunzerobits is a bitmap that tracks which targets already have a
1774 * Returns 1 if an enclosure was added, 0 if not.
1776 static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1777 struct hpsa_scsi_dev_t *tmpdevice,
1778 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1779 int bus, int target, int lun, unsigned long lunzerobits[],
1780 int *nmsa2xxx_enclosures)
1782 unsigned char scsi3addr[8];
1784 if (test_bit(target, lunzerobits))
1785 return 0; /* There is already a lun 0 on this target. */
1787 if (!is_logical_dev_addr_mode(lunaddrbytes))
1788 return 0; /* It's the logical targets that may lack lun 0. */
1790 if (!is_msa2xxx(h, tmpdevice))
1791 return 0; /* It's only the MSA2xxx that have this problem. */
1793 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1796 memset(scsi3addr, 0, 8);
1797 scsi3addr[3] = target;
1798 if (is_hba_lunid(scsi3addr))
1799 return 0; /* Don't add the RAID controller here. */
1801 if (is_scsi_rev_5(h))
1802 return 0; /* p1210m doesn't need to do this. */
1804 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1805 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1806 "enclosures exceeded. Check your hardware "
1811 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1813 (*nmsa2xxx_enclosures)++;
1814 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1815 set_bit(target, lunzerobits);
1820 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1821 * logdev. The number of luns in physdev and logdev are returned in
1822 * *nphysicals and *nlogicals, respectively.
1823 * Returns 0 on success, -1 otherwise.
1825 static int hpsa_gather_lun_info(struct ctlr_info *h,
1827 struct ReportLUNdata *physdev, u32 *nphysicals,
1828 struct ReportLUNdata *logdev, u32 *nlogicals)
1830 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1831 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1834 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1835 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1836 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1837 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1838 *nphysicals - HPSA_MAX_PHYS_LUN);
1839 *nphysicals = HPSA_MAX_PHYS_LUN;
1841 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1842 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1845 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1846 /* Reject Logicals in excess of our max capability. */
1847 if (*nlogicals > HPSA_MAX_LUN) {
1848 dev_warn(&h->pdev->dev,
1849 "maximum logical LUNs (%d) exceeded. "
1850 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1851 *nlogicals - HPSA_MAX_LUN);
1852 *nlogicals = HPSA_MAX_LUN;
1854 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1855 dev_warn(&h->pdev->dev,
1856 "maximum logical + physical LUNs (%d) exceeded. "
1857 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1858 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1859 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1864 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1865 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1866 struct ReportLUNdata *logdev_list)
1868 /* Helper function, figure out where the LUN ID info is coming from
1869 * given index i, lists of physical and logical devices, where in
1870 * the list the raid controller is supposed to appear (first or last)
1873 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1874 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1876 if (i == raid_ctlr_position)
1877 return RAID_CTLR_LUNID;
1879 if (i < logicals_start)
1880 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1882 if (i < last_device)
1883 return &logdev_list->LUN[i - nphysicals -
1884 (raid_ctlr_position == 0)][0];
1889 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1891 /* the idea here is we could get notified
1892 * that some devices have changed, so we do a report
1893 * physical luns and report logical luns cmd, and adjust
1894 * our list of devices accordingly.
1896 * The scsi3addr's of devices won't change so long as the
1897 * adapter is not reset. That means we can rescan and
1898 * tell which devices we already know about, vs. new
1899 * devices, vs. disappearing devices.
1901 struct ReportLUNdata *physdev_list = NULL;
1902 struct ReportLUNdata *logdev_list = NULL;
1905 u32 ndev_allocated = 0;
1906 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1908 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1909 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1910 int bus, target, lun;
1911 int raid_ctlr_position;
1912 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1914 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
1915 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1916 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1917 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1919 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
1920 dev_err(&h->pdev->dev, "out of memory\n");
1923 memset(lunzerobits, 0, sizeof(lunzerobits));
1925 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1926 logdev_list, &nlogicals))
1929 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1930 * but each of them 4 times through different paths. The plus 1
1931 * is for the RAID controller.
1933 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1935 /* Allocate the per device structures */
1936 for (i = 0; i < ndevs_to_allocate; i++) {
1937 if (i >= HPSA_MAX_DEVICES) {
1938 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
1939 " %d devices ignored.\n", HPSA_MAX_DEVICES,
1940 ndevs_to_allocate - HPSA_MAX_DEVICES);
1944 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1945 if (!currentsd[i]) {
1946 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1947 __FILE__, __LINE__);
1953 if (unlikely(is_scsi_rev_5(h)))
1954 raid_ctlr_position = 0;
1956 raid_ctlr_position = nphysicals + nlogicals;
1958 /* adjust our table of devices */
1959 nmsa2xxx_enclosures = 0;
1960 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1961 u8 *lunaddrbytes, is_OBDR = 0;
1963 /* Figure out where the LUN ID info is coming from */
1964 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1965 i, nphysicals, nlogicals, physdev_list, logdev_list);
1966 /* skip masked physical devices. */
1967 if (lunaddrbytes[3] & 0xC0 &&
1968 i < nphysicals + (raid_ctlr_position == 0))
1971 /* Get device type, vendor, model, device id */
1972 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1974 continue; /* skip it if we can't talk to it. */
1975 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1977 this_device = currentsd[ncurrent];
1980 * For the msa2xxx boxes, we have to insert a LUN 0 which
1981 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1982 * is nonetheless an enclosure device there. We have to
1983 * present that otherwise linux won't find anything if
1984 * there is no lun 0.
1986 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1987 lunaddrbytes, bus, target, lun, lunzerobits,
1988 &nmsa2xxx_enclosures)) {
1990 this_device = currentsd[ncurrent];
1993 *this_device = *tmpdevice;
1994 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1996 switch (this_device->devtype) {
1998 /* We don't *really* support actual CD-ROM devices,
1999 * just "One Button Disaster Recovery" tape drive
2000 * which temporarily pretends to be a CD-ROM drive.
2001 * So we check that the device is really an OBDR tape
2002 * device by checking for "$DR-10" in bytes 43-48 of
2014 case TYPE_MEDIUM_CHANGER:
2018 /* Only present the Smartarray HBA as a RAID controller.
2019 * If it's a RAID controller other than the HBA itself
2020 * (an external RAID controller, MSA500 or similar)
2023 if (!is_hba_lunid(lunaddrbytes))
2030 if (ncurrent >= HPSA_MAX_DEVICES)
2033 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
2036 for (i = 0; i < ndev_allocated; i++)
2037 kfree(currentsd[i]);
2039 kfree(physdev_list);
2043 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
2044 * dma mapping and fills in the scatter gather entries of the
2047 static int hpsa_scatter_gather(struct ctlr_info *h,
2048 struct CommandList *cp,
2049 struct scsi_cmnd *cmd)
2052 struct scatterlist *sg;
2054 int use_sg, i, sg_index, chained;
2055 struct SGDescriptor *curr_sg;
2057 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
2059 use_sg = scsi_dma_map(cmd);
2064 goto sglist_finished;
2069 scsi_for_each_sg(cmd, sg, use_sg, i) {
2070 if (i == h->max_cmd_sg_entries - 1 &&
2071 use_sg > h->max_cmd_sg_entries) {
2073 curr_sg = h->cmd_sg_list[cp->cmdindex];
2076 addr64 = (u64) sg_dma_address(sg);
2077 len = sg_dma_len(sg);
2078 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2079 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2081 curr_sg->Ext = 0; /* we are not chaining */
2085 if (use_sg + chained > h->maxSG)
2086 h->maxSG = use_sg + chained;
2089 cp->Header.SGList = h->max_cmd_sg_entries;
2090 cp->Header.SGTotal = (u16) (use_sg + 1);
2091 hpsa_map_sg_chain_block(h, cp);
2097 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
2098 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
2103 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2104 void (*done)(struct scsi_cmnd *))
2106 struct ctlr_info *h;
2107 struct hpsa_scsi_dev_t *dev;
2108 unsigned char scsi3addr[8];
2109 struct CommandList *c;
2110 unsigned long flags;
2112 /* Get the ptr to our adapter structure out of cmd->host. */
2113 h = sdev_to_hba(cmd->device);
2114 dev = cmd->device->hostdata;
2116 cmd->result = DID_NO_CONNECT << 16;
2120 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
2122 spin_lock_irqsave(&h->lock, flags);
2123 if (unlikely(h->lockup_detected)) {
2124 spin_unlock_irqrestore(&h->lock, flags);
2125 cmd->result = DID_ERROR << 16;
2129 /* Need a lock as this is being allocated from the pool */
2131 spin_unlock_irqrestore(&h->lock, flags);
2132 if (c == NULL) { /* trouble... */
2133 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2134 return SCSI_MLQUEUE_HOST_BUSY;
2137 /* Fill in the command list header */
2139 cmd->scsi_done = done; /* save this for use by completion code */
2141 /* save c in case we have to abort it */
2142 cmd->host_scribble = (unsigned char *) c;
2144 c->cmd_type = CMD_SCSI;
2146 c->Header.ReplyQueue = 0; /* unused in simple mode */
2147 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2148 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2149 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
2151 /* Fill in the request block... */
2153 c->Request.Timeout = 0;
2154 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2155 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2156 c->Request.CDBLen = cmd->cmd_len;
2157 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2158 c->Request.Type.Type = TYPE_CMD;
2159 c->Request.Type.Attribute = ATTR_SIMPLE;
2160 switch (cmd->sc_data_direction) {
2162 c->Request.Type.Direction = XFER_WRITE;
2164 case DMA_FROM_DEVICE:
2165 c->Request.Type.Direction = XFER_READ;
2168 c->Request.Type.Direction = XFER_NONE;
2170 case DMA_BIDIRECTIONAL:
2171 /* This can happen if a buggy application does a scsi passthru
2172 * and sets both inlen and outlen to non-zero. ( see
2173 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2176 c->Request.Type.Direction = XFER_RSVD;
2177 /* This is technically wrong, and hpsa controllers should
2178 * reject it with CMD_INVALID, which is the most correct
2179 * response, but non-fibre backends appear to let it
2180 * slide by, and give the same results as if this field
2181 * were set correctly. Either way is acceptable for
2182 * our purposes here.
2188 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2189 cmd->sc_data_direction);
2194 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2196 return SCSI_MLQUEUE_HOST_BUSY;
2198 enqueue_cmd_and_start_io(h, c);
2199 /* the cmd'll come back via intr handler in complete_scsi_command() */
2203 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2205 static void hpsa_scan_start(struct Scsi_Host *sh)
2207 struct ctlr_info *h = shost_to_hba(sh);
2208 unsigned long flags;
2210 /* wait until any scan already in progress is finished. */
2212 spin_lock_irqsave(&h->scan_lock, flags);
2213 if (h->scan_finished)
2215 spin_unlock_irqrestore(&h->scan_lock, flags);
2216 wait_event(h->scan_wait_queue, h->scan_finished);
2217 /* Note: We don't need to worry about a race between this
2218 * thread and driver unload because the midlayer will
2219 * have incremented the reference count, so unload won't
2220 * happen if we're in here.
2223 h->scan_finished = 0; /* mark scan as in progress */
2224 spin_unlock_irqrestore(&h->scan_lock, flags);
2226 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2228 spin_lock_irqsave(&h->scan_lock, flags);
2229 h->scan_finished = 1; /* mark scan as finished. */
2230 wake_up_all(&h->scan_wait_queue);
2231 spin_unlock_irqrestore(&h->scan_lock, flags);
2234 static int hpsa_scan_finished(struct Scsi_Host *sh,
2235 unsigned long elapsed_time)
2237 struct ctlr_info *h = shost_to_hba(sh);
2238 unsigned long flags;
2241 spin_lock_irqsave(&h->scan_lock, flags);
2242 finished = h->scan_finished;
2243 spin_unlock_irqrestore(&h->scan_lock, flags);
2247 static int hpsa_change_queue_depth(struct scsi_device *sdev,
2248 int qdepth, int reason)
2250 struct ctlr_info *h = sdev_to_hba(sdev);
2252 if (reason != SCSI_QDEPTH_DEFAULT)
2258 if (qdepth > h->nr_cmds)
2259 qdepth = h->nr_cmds;
2260 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2261 return sdev->queue_depth;
2264 static void hpsa_unregister_scsi(struct ctlr_info *h)
2266 /* we are being forcibly unloaded, and may not refuse. */
2267 scsi_remove_host(h->scsi_host);
2268 scsi_host_put(h->scsi_host);
2269 h->scsi_host = NULL;
2272 static int hpsa_register_scsi(struct ctlr_info *h)
2276 rc = hpsa_scsi_detect(h);
2278 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2279 " hpsa_scsi_detect(), rc is %d\n", rc);
2283 static int wait_for_device_to_become_ready(struct ctlr_info *h,
2284 unsigned char lunaddr[])
2288 int waittime = 1; /* seconds */
2289 struct CommandList *c;
2291 c = cmd_special_alloc(h);
2293 dev_warn(&h->pdev->dev, "out of memory in "
2294 "wait_for_device_to_become_ready.\n");
2298 /* Send test unit ready until device ready, or give up. */
2299 while (count < HPSA_TUR_RETRY_LIMIT) {
2301 /* Wait for a bit. do this first, because if we send
2302 * the TUR right away, the reset will just abort it.
2304 msleep(1000 * waittime);
2307 /* Increase wait time with each try, up to a point. */
2308 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2309 waittime = waittime * 2;
2311 /* Send the Test Unit Ready */
2312 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2313 hpsa_scsi_do_simple_cmd_core(h, c);
2314 /* no unmap needed here because no data xfer. */
2316 if (c->err_info->CommandStatus == CMD_SUCCESS)
2319 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2320 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2321 (c->err_info->SenseInfo[2] == NO_SENSE ||
2322 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2325 dev_warn(&h->pdev->dev, "waiting %d secs "
2326 "for device to become ready.\n", waittime);
2327 rc = 1; /* device not ready. */
2331 dev_warn(&h->pdev->dev, "giving up on device.\n");
2333 dev_warn(&h->pdev->dev, "device is ready.\n");
2335 cmd_special_free(h, c);
2339 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2340 * complaining. Doing a host- or bus-reset can't do anything good here.
2342 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2345 struct ctlr_info *h;
2346 struct hpsa_scsi_dev_t *dev;
2348 /* find the controller to which the command to be aborted was sent */
2349 h = sdev_to_hba(scsicmd->device);
2350 if (h == NULL) /* paranoia */
2352 dev = scsicmd->device->hostdata;
2354 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2355 "device lookup failed.\n");
2358 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2359 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2360 /* send a reset to the SCSI LUN which the command was sent to */
2361 rc = hpsa_send_reset(h, dev->scsi3addr);
2362 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2365 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2370 * For operations that cannot sleep, a command block is allocated at init,
2371 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2372 * which ones are free or in use. Lock must be held when calling this.
2373 * cmd_free() is the complement.
2375 static struct CommandList *cmd_alloc(struct ctlr_info *h)
2377 struct CommandList *c;
2379 union u64bit temp64;
2380 dma_addr_t cmd_dma_handle, err_dma_handle;
2383 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2384 if (i == h->nr_cmds)
2386 } while (test_and_set_bit
2387 (i & (BITS_PER_LONG - 1),
2388 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2389 c = h->cmd_pool + i;
2390 memset(c, 0, sizeof(*c));
2391 cmd_dma_handle = h->cmd_pool_dhandle
2393 c->err_info = h->errinfo_pool + i;
2394 memset(c->err_info, 0, sizeof(*c->err_info));
2395 err_dma_handle = h->errinfo_pool_dhandle
2396 + i * sizeof(*c->err_info);
2401 INIT_LIST_HEAD(&c->list);
2402 c->busaddr = (u32) cmd_dma_handle;
2403 temp64.val = (u64) err_dma_handle;
2404 c->ErrDesc.Addr.lower = temp64.val32.lower;
2405 c->ErrDesc.Addr.upper = temp64.val32.upper;
2406 c->ErrDesc.Len = sizeof(*c->err_info);
2412 /* For operations that can wait for kmalloc to possibly sleep,
2413 * this routine can be called. Lock need not be held to call
2414 * cmd_special_alloc. cmd_special_free() is the complement.
2416 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2418 struct CommandList *c;
2419 union u64bit temp64;
2420 dma_addr_t cmd_dma_handle, err_dma_handle;
2422 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2425 memset(c, 0, sizeof(*c));
2429 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2432 if (c->err_info == NULL) {
2433 pci_free_consistent(h->pdev,
2434 sizeof(*c), c, cmd_dma_handle);
2437 memset(c->err_info, 0, sizeof(*c->err_info));
2439 INIT_LIST_HEAD(&c->list);
2440 c->busaddr = (u32) cmd_dma_handle;
2441 temp64.val = (u64) err_dma_handle;
2442 c->ErrDesc.Addr.lower = temp64.val32.lower;
2443 c->ErrDesc.Addr.upper = temp64.val32.upper;
2444 c->ErrDesc.Len = sizeof(*c->err_info);
2450 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2454 i = c - h->cmd_pool;
2455 clear_bit(i & (BITS_PER_LONG - 1),
2456 h->cmd_pool_bits + (i / BITS_PER_LONG));
2460 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2462 union u64bit temp64;
2464 temp64.val32.lower = c->ErrDesc.Addr.lower;
2465 temp64.val32.upper = c->ErrDesc.Addr.upper;
2466 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2467 c->err_info, (dma_addr_t) temp64.val);
2468 pci_free_consistent(h->pdev, sizeof(*c),
2469 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2472 #ifdef CONFIG_COMPAT
2474 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2476 IOCTL32_Command_struct __user *arg32 =
2477 (IOCTL32_Command_struct __user *) arg;
2478 IOCTL_Command_struct arg64;
2479 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2483 memset(&arg64, 0, sizeof(arg64));
2485 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2486 sizeof(arg64.LUN_info));
2487 err |= copy_from_user(&arg64.Request, &arg32->Request,
2488 sizeof(arg64.Request));
2489 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2490 sizeof(arg64.error_info));
2491 err |= get_user(arg64.buf_size, &arg32->buf_size);
2492 err |= get_user(cp, &arg32->buf);
2493 arg64.buf = compat_ptr(cp);
2494 err |= copy_to_user(p, &arg64, sizeof(arg64));
2499 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2502 err |= copy_in_user(&arg32->error_info, &p->error_info,
2503 sizeof(arg32->error_info));
2509 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2512 BIG_IOCTL32_Command_struct __user *arg32 =
2513 (BIG_IOCTL32_Command_struct __user *) arg;
2514 BIG_IOCTL_Command_struct arg64;
2515 BIG_IOCTL_Command_struct __user *p =
2516 compat_alloc_user_space(sizeof(arg64));
2520 memset(&arg64, 0, sizeof(arg64));
2522 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2523 sizeof(arg64.LUN_info));
2524 err |= copy_from_user(&arg64.Request, &arg32->Request,
2525 sizeof(arg64.Request));
2526 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2527 sizeof(arg64.error_info));
2528 err |= get_user(arg64.buf_size, &arg32->buf_size);
2529 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2530 err |= get_user(cp, &arg32->buf);
2531 arg64.buf = compat_ptr(cp);
2532 err |= copy_to_user(p, &arg64, sizeof(arg64));
2537 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2540 err |= copy_in_user(&arg32->error_info, &p->error_info,
2541 sizeof(arg32->error_info));
2547 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2550 case CCISS_GETPCIINFO:
2551 case CCISS_GETINTINFO:
2552 case CCISS_SETINTINFO:
2553 case CCISS_GETNODENAME:
2554 case CCISS_SETNODENAME:
2555 case CCISS_GETHEARTBEAT:
2556 case CCISS_GETBUSTYPES:
2557 case CCISS_GETFIRMVER:
2558 case CCISS_GETDRIVVER:
2559 case CCISS_REVALIDVOLS:
2560 case CCISS_DEREGDISK:
2561 case CCISS_REGNEWDISK:
2563 case CCISS_RESCANDISK:
2564 case CCISS_GETLUNINFO:
2565 return hpsa_ioctl(dev, cmd, arg);
2567 case CCISS_PASSTHRU32:
2568 return hpsa_ioctl32_passthru(dev, cmd, arg);
2569 case CCISS_BIG_PASSTHRU32:
2570 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2573 return -ENOIOCTLCMD;
2578 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2580 struct hpsa_pci_info pciinfo;
2584 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2585 pciinfo.bus = h->pdev->bus->number;
2586 pciinfo.dev_fn = h->pdev->devfn;
2587 pciinfo.board_id = h->board_id;
2588 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2593 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2595 DriverVer_type DriverVer;
2596 unsigned char vmaj, vmin, vsubmin;
2599 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2600 &vmaj, &vmin, &vsubmin);
2602 dev_info(&h->pdev->dev, "driver version string '%s' "
2603 "unrecognized.", HPSA_DRIVER_VERSION);
2608 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2611 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2616 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2618 IOCTL_Command_struct iocommand;
2619 struct CommandList *c;
2621 union u64bit temp64;
2625 if (!capable(CAP_SYS_RAWIO))
2627 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2629 if ((iocommand.buf_size < 1) &&
2630 (iocommand.Request.Type.Direction != XFER_NONE)) {
2633 if (iocommand.buf_size > 0) {
2634 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2637 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2638 /* Copy the data into the buffer we created */
2639 if (copy_from_user(buff, iocommand.buf,
2640 iocommand.buf_size)) {
2645 memset(buff, 0, iocommand.buf_size);
2648 c = cmd_special_alloc(h);
2653 /* Fill in the command type */
2654 c->cmd_type = CMD_IOCTL_PEND;
2655 /* Fill in Command Header */
2656 c->Header.ReplyQueue = 0; /* unused in simple mode */
2657 if (iocommand.buf_size > 0) { /* buffer to fill */
2658 c->Header.SGList = 1;
2659 c->Header.SGTotal = 1;
2660 } else { /* no buffers to fill */
2661 c->Header.SGList = 0;
2662 c->Header.SGTotal = 0;
2664 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2665 /* use the kernel address the cmd block for tag */
2666 c->Header.Tag.lower = c->busaddr;
2668 /* Fill in Request block */
2669 memcpy(&c->Request, &iocommand.Request,
2670 sizeof(c->Request));
2672 /* Fill in the scatter gather information */
2673 if (iocommand.buf_size > 0) {
2674 temp64.val = pci_map_single(h->pdev, buff,
2675 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2676 c->SG[0].Addr.lower = temp64.val32.lower;
2677 c->SG[0].Addr.upper = temp64.val32.upper;
2678 c->SG[0].Len = iocommand.buf_size;
2679 c->SG[0].Ext = 0; /* we are not chaining*/
2681 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
2682 if (iocommand.buf_size > 0)
2683 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2684 check_ioctl_unit_attention(h, c);
2686 /* Copy the error information out */
2687 memcpy(&iocommand.error_info, c->err_info,
2688 sizeof(iocommand.error_info));
2689 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2691 cmd_special_free(h, c);
2694 if (iocommand.Request.Type.Direction == XFER_READ &&
2695 iocommand.buf_size > 0) {
2696 /* Copy the data out of the buffer we created */
2697 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2699 cmd_special_free(h, c);
2704 cmd_special_free(h, c);
2708 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2710 BIG_IOCTL_Command_struct *ioc;
2711 struct CommandList *c;
2712 unsigned char **buff = NULL;
2713 int *buff_size = NULL;
2714 union u64bit temp64;
2720 BYTE __user *data_ptr;
2724 if (!capable(CAP_SYS_RAWIO))
2726 ioc = (BIG_IOCTL_Command_struct *)
2727 kmalloc(sizeof(*ioc), GFP_KERNEL);
2732 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2736 if ((ioc->buf_size < 1) &&
2737 (ioc->Request.Type.Direction != XFER_NONE)) {
2741 /* Check kmalloc limits using all SGs */
2742 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2746 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2750 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2755 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2760 left = ioc->buf_size;
2761 data_ptr = ioc->buf;
2763 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2764 buff_size[sg_used] = sz;
2765 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2766 if (buff[sg_used] == NULL) {
2770 if (ioc->Request.Type.Direction == XFER_WRITE) {
2771 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2776 memset(buff[sg_used], 0, sz);
2781 c = cmd_special_alloc(h);
2786 c->cmd_type = CMD_IOCTL_PEND;
2787 c->Header.ReplyQueue = 0;
2788 c->Header.SGList = c->Header.SGTotal = sg_used;
2789 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2790 c->Header.Tag.lower = c->busaddr;
2791 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2792 if (ioc->buf_size > 0) {
2794 for (i = 0; i < sg_used; i++) {
2795 temp64.val = pci_map_single(h->pdev, buff[i],
2796 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2797 c->SG[i].Addr.lower = temp64.val32.lower;
2798 c->SG[i].Addr.upper = temp64.val32.upper;
2799 c->SG[i].Len = buff_size[i];
2800 /* we are not chaining */
2804 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
2806 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2807 check_ioctl_unit_attention(h, c);
2808 /* Copy the error information out */
2809 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2810 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2811 cmd_special_free(h, c);
2815 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
2816 /* Copy the data out of the buffer we created */
2817 BYTE __user *ptr = ioc->buf;
2818 for (i = 0; i < sg_used; i++) {
2819 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2820 cmd_special_free(h, c);
2824 ptr += buff_size[i];
2827 cmd_special_free(h, c);
2831 for (i = 0; i < sg_used; i++)
2840 static void check_ioctl_unit_attention(struct ctlr_info *h,
2841 struct CommandList *c)
2843 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2844 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2845 (void) check_for_unit_attention(h, c);
2850 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2852 struct ctlr_info *h;
2853 void __user *argp = (void __user *)arg;
2855 h = sdev_to_hba(dev);
2858 case CCISS_DEREGDISK:
2859 case CCISS_REGNEWDISK:
2861 hpsa_scan_start(h->scsi_host);
2863 case CCISS_GETPCIINFO:
2864 return hpsa_getpciinfo_ioctl(h, argp);
2865 case CCISS_GETDRIVVER:
2866 return hpsa_getdrivver_ioctl(h, argp);
2867 case CCISS_PASSTHRU:
2868 return hpsa_passthru_ioctl(h, argp);
2869 case CCISS_BIG_PASSTHRU:
2870 return hpsa_big_passthru_ioctl(h, argp);
2876 static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
2877 unsigned char *scsi3addr, u8 reset_type)
2879 struct CommandList *c;
2884 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2885 RAID_CTLR_LUNID, TYPE_MSG);
2886 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2888 enqueue_cmd_and_start_io(h, c);
2889 /* Don't wait for completion, the reset won't complete. Don't free
2890 * the command either. This is the last command we will send before
2891 * re-initializing everything, so it doesn't matter and won't leak.
2896 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2897 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2900 int pci_dir = XFER_NONE;
2902 c->cmd_type = CMD_IOCTL_PEND;
2903 c->Header.ReplyQueue = 0;
2904 if (buff != NULL && size > 0) {
2905 c->Header.SGList = 1;
2906 c->Header.SGTotal = 1;
2908 c->Header.SGList = 0;
2909 c->Header.SGTotal = 0;
2911 c->Header.Tag.lower = c->busaddr;
2912 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2914 c->Request.Type.Type = cmd_type;
2915 if (cmd_type == TYPE_CMD) {
2918 /* are we trying to read a vital product page */
2919 if (page_code != 0) {
2920 c->Request.CDB[1] = 0x01;
2921 c->Request.CDB[2] = page_code;
2923 c->Request.CDBLen = 6;
2924 c->Request.Type.Attribute = ATTR_SIMPLE;
2925 c->Request.Type.Direction = XFER_READ;
2926 c->Request.Timeout = 0;
2927 c->Request.CDB[0] = HPSA_INQUIRY;
2928 c->Request.CDB[4] = size & 0xFF;
2930 case HPSA_REPORT_LOG:
2931 case HPSA_REPORT_PHYS:
2932 /* Talking to controller so It's a physical command
2933 mode = 00 target = 0. Nothing to write.
2935 c->Request.CDBLen = 12;
2936 c->Request.Type.Attribute = ATTR_SIMPLE;
2937 c->Request.Type.Direction = XFER_READ;
2938 c->Request.Timeout = 0;
2939 c->Request.CDB[0] = cmd;
2940 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2941 c->Request.CDB[7] = (size >> 16) & 0xFF;
2942 c->Request.CDB[8] = (size >> 8) & 0xFF;
2943 c->Request.CDB[9] = size & 0xFF;
2945 case HPSA_CACHE_FLUSH:
2946 c->Request.CDBLen = 12;
2947 c->Request.Type.Attribute = ATTR_SIMPLE;
2948 c->Request.Type.Direction = XFER_WRITE;
2949 c->Request.Timeout = 0;
2950 c->Request.CDB[0] = BMIC_WRITE;
2951 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2952 c->Request.CDB[7] = (size >> 8) & 0xFF;
2953 c->Request.CDB[8] = size & 0xFF;
2955 case TEST_UNIT_READY:
2956 c->Request.CDBLen = 6;
2957 c->Request.Type.Attribute = ATTR_SIMPLE;
2958 c->Request.Type.Direction = XFER_NONE;
2959 c->Request.Timeout = 0;
2962 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2966 } else if (cmd_type == TYPE_MSG) {
2969 case HPSA_DEVICE_RESET_MSG:
2970 c->Request.CDBLen = 16;
2971 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2972 c->Request.Type.Attribute = ATTR_SIMPLE;
2973 c->Request.Type.Direction = XFER_NONE;
2974 c->Request.Timeout = 0; /* Don't time out */
2975 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2976 c->Request.CDB[0] = cmd;
2977 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
2978 /* If bytes 4-7 are zero, it means reset the */
2980 c->Request.CDB[4] = 0x00;
2981 c->Request.CDB[5] = 0x00;
2982 c->Request.CDB[6] = 0x00;
2983 c->Request.CDB[7] = 0x00;
2987 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2992 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2996 switch (c->Request.Type.Direction) {
2998 pci_dir = PCI_DMA_FROMDEVICE;
3001 pci_dir = PCI_DMA_TODEVICE;
3004 pci_dir = PCI_DMA_NONE;
3007 pci_dir = PCI_DMA_BIDIRECTIONAL;
3010 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
3016 * Map (physical) PCI mem into (virtual) kernel space
3018 static void __iomem *remap_pci_mem(ulong base, ulong size)
3020 ulong page_base = ((ulong) base) & PAGE_MASK;
3021 ulong page_offs = ((ulong) base) - page_base;
3022 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
3024 return page_remapped ? (page_remapped + page_offs) : NULL;
3027 /* Takes cmds off the submission queue and sends them to the hardware,
3028 * then puts them on the queue of cmds waiting for completion.
3030 static void start_io(struct ctlr_info *h)
3032 struct CommandList *c;
3034 while (!list_empty(&h->reqQ)) {
3035 c = list_entry(h->reqQ.next, struct CommandList, list);
3036 /* can't do anything if fifo is full */
3037 if ((h->access.fifo_full(h))) {
3038 dev_warn(&h->pdev->dev, "fifo full\n");
3042 /* Get the first entry from the Request Q */
3046 /* Tell the controller execute command */
3047 h->access.submit_command(h, c);
3049 /* Put job onto the completed Q */
3054 static inline unsigned long get_next_completion(struct ctlr_info *h)
3056 return h->access.command_completed(h);
3059 static inline bool interrupt_pending(struct ctlr_info *h)
3061 return h->access.intr_pending(h);
3064 static inline long interrupt_not_for_us(struct ctlr_info *h)
3066 return (h->access.intr_pending(h) == 0) ||
3067 (h->interrupts_enabled == 0);
3070 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3073 if (unlikely(tag_index >= h->nr_cmds)) {
3074 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
3080 static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
3083 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
3084 if (likely(c->cmd_type == CMD_SCSI))
3085 complete_scsi_command(c);
3086 else if (c->cmd_type == CMD_IOCTL_PEND)
3087 complete(c->waiting);
3090 static inline u32 hpsa_tag_contains_index(u32 tag)
3092 return tag & DIRECT_LOOKUP_BIT;
3095 static inline u32 hpsa_tag_to_index(u32 tag)
3097 return tag >> DIRECT_LOOKUP_SHIFT;
3101 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
3103 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3104 #define HPSA_SIMPLE_ERROR_BITS 0x03
3105 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
3106 return tag & ~HPSA_SIMPLE_ERROR_BITS;
3107 return tag & ~HPSA_PERF_ERROR_BITS;
3110 /* process completion of an indexed ("direct lookup") command */
3111 static inline u32 process_indexed_cmd(struct ctlr_info *h,
3115 struct CommandList *c;
3117 tag_index = hpsa_tag_to_index(raw_tag);
3118 if (bad_tag(h, tag_index, raw_tag))
3119 return next_command(h);
3120 c = h->cmd_pool + tag_index;
3121 finish_cmd(c, raw_tag);
3122 return next_command(h);
3125 /* process completion of a non-indexed command */
3126 static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
3130 struct CommandList *c = NULL;
3132 tag = hpsa_tag_discard_error_bits(h, raw_tag);
3133 list_for_each_entry(c, &h->cmpQ, list) {
3134 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3135 finish_cmd(c, raw_tag);
3136 return next_command(h);
3139 bad_tag(h, h->nr_cmds + 1, raw_tag);
3140 return next_command(h);
3143 /* Some controllers, like p400, will give us one interrupt
3144 * after a soft reset, even if we turned interrupts off.
3145 * Only need to check for this in the hpsa_xxx_discard_completions
3148 static int ignore_bogus_interrupt(struct ctlr_info *h)
3150 if (likely(!reset_devices))
3153 if (likely(h->interrupts_enabled))
3156 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3157 "(known firmware bug.) Ignoring.\n");
3162 static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
3164 struct ctlr_info *h = dev_id;
3165 unsigned long flags;
3168 if (ignore_bogus_interrupt(h))
3171 if (interrupt_not_for_us(h))
3173 spin_lock_irqsave(&h->lock, flags);
3174 h->last_intr_timestamp = get_jiffies_64();
3175 while (interrupt_pending(h)) {
3176 raw_tag = get_next_completion(h);
3177 while (raw_tag != FIFO_EMPTY)
3178 raw_tag = next_command(h);
3180 spin_unlock_irqrestore(&h->lock, flags);
3184 static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
3186 struct ctlr_info *h = dev_id;
3187 unsigned long flags;
3190 if (ignore_bogus_interrupt(h))
3193 spin_lock_irqsave(&h->lock, flags);
3194 h->last_intr_timestamp = get_jiffies_64();
3195 raw_tag = get_next_completion(h);
3196 while (raw_tag != FIFO_EMPTY)
3197 raw_tag = next_command(h);
3198 spin_unlock_irqrestore(&h->lock, flags);
3202 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
3204 struct ctlr_info *h = dev_id;
3205 unsigned long flags;
3208 if (interrupt_not_for_us(h))
3210 spin_lock_irqsave(&h->lock, flags);
3211 h->last_intr_timestamp = get_jiffies_64();
3212 while (interrupt_pending(h)) {
3213 raw_tag = get_next_completion(h);
3214 while (raw_tag != FIFO_EMPTY) {
3215 if (hpsa_tag_contains_index(raw_tag))
3216 raw_tag = process_indexed_cmd(h, raw_tag);
3218 raw_tag = process_nonindexed_cmd(h, raw_tag);
3221 spin_unlock_irqrestore(&h->lock, flags);
3225 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
3227 struct ctlr_info *h = dev_id;
3228 unsigned long flags;
3231 spin_lock_irqsave(&h->lock, flags);
3232 h->last_intr_timestamp = get_jiffies_64();
3233 raw_tag = get_next_completion(h);
3234 while (raw_tag != FIFO_EMPTY) {
3235 if (hpsa_tag_contains_index(raw_tag))
3236 raw_tag = process_indexed_cmd(h, raw_tag);
3238 raw_tag = process_nonindexed_cmd(h, raw_tag);
3240 spin_unlock_irqrestore(&h->lock, flags);
3244 /* Send a message CDB to the firmware. Careful, this only works
3245 * in simple mode, not performant mode due to the tag lookup.
3246 * We only ever use this immediately after a controller reset.
3248 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3252 struct CommandListHeader CommandHeader;
3253 struct RequestBlock Request;
3254 struct ErrDescriptor ErrorDescriptor;
3256 struct Command *cmd;
3257 static const size_t cmd_sz = sizeof(*cmd) +
3258 sizeof(cmd->ErrorDescriptor);
3260 uint32_t paddr32, tag;
3261 void __iomem *vaddr;
3264 vaddr = pci_ioremap_bar(pdev, 0);
3268 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3269 * CCISS commands, so they must be allocated from the lower 4GiB of
3272 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3278 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3284 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3285 * although there's no guarantee, we assume that the address is at
3286 * least 4-byte aligned (most likely, it's page-aligned).
3290 cmd->CommandHeader.ReplyQueue = 0;
3291 cmd->CommandHeader.SGList = 0;
3292 cmd->CommandHeader.SGTotal = 0;
3293 cmd->CommandHeader.Tag.lower = paddr32;
3294 cmd->CommandHeader.Tag.upper = 0;
3295 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3297 cmd->Request.CDBLen = 16;
3298 cmd->Request.Type.Type = TYPE_MSG;
3299 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3300 cmd->Request.Type.Direction = XFER_NONE;
3301 cmd->Request.Timeout = 0; /* Don't time out */
3302 cmd->Request.CDB[0] = opcode;
3303 cmd->Request.CDB[1] = type;
3304 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3305 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3306 cmd->ErrorDescriptor.Addr.upper = 0;
3307 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3309 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3311 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3312 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3313 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3315 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3320 /* we leak the DMA buffer here ... no choice since the controller could
3321 * still complete the command.
3323 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3324 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3329 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3331 if (tag & HPSA_ERROR_BIT) {
3332 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3337 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3342 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3344 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3345 void * __iomem vaddr, u32 use_doorbell)
3351 /* For everything after the P600, the PCI power state method
3352 * of resetting the controller doesn't work, so we have this
3353 * other way using the doorbell register.
3355 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3356 writel(use_doorbell, vaddr + SA5_DOORBELL);
3357 } else { /* Try to do it the PCI power state way */
3359 /* Quoting from the Open CISS Specification: "The Power
3360 * Management Control/Status Register (CSR) controls the power
3361 * state of the device. The normal operating state is D0,
3362 * CSR=00h. The software off state is D3, CSR=03h. To reset
3363 * the controller, place the interface device in D3 then to D0,
3364 * this causes a secondary PCI reset which will reset the
3367 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3370 "hpsa_reset_controller: "
3371 "PCI PM not supported\n");
3374 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3375 /* enter the D3hot power management state */
3376 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3377 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3379 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3383 /* enter the D0 power management state */
3384 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3386 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3389 * The P600 requires a small delay when changing states.
3390 * Otherwise we may think the board did not reset and we bail.
3391 * This for kdump only and is particular to the P600.
3398 static __devinit void init_driver_version(char *driver_version, int len)
3400 memset(driver_version, 0, len);
3401 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
3404 static __devinit int write_driver_ver_to_cfgtable(
3405 struct CfgTable __iomem *cfgtable)
3407 char *driver_version;
3408 int i, size = sizeof(cfgtable->driver_version);
3410 driver_version = kmalloc(size, GFP_KERNEL);
3411 if (!driver_version)
3414 init_driver_version(driver_version, size);
3415 for (i = 0; i < size; i++)
3416 writeb(driver_version[i], &cfgtable->driver_version[i]);
3417 kfree(driver_version);
3421 static __devinit void read_driver_ver_from_cfgtable(
3422 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3426 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3427 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3430 static __devinit int controller_reset_failed(
3431 struct CfgTable __iomem *cfgtable)
3434 char *driver_ver, *old_driver_ver;
3435 int rc, size = sizeof(cfgtable->driver_version);
3437 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3438 if (!old_driver_ver)
3440 driver_ver = old_driver_ver + size;
3442 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3443 * should have been changed, otherwise we know the reset failed.
3445 init_driver_version(old_driver_ver, size);
3446 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3447 rc = !memcmp(driver_ver, old_driver_ver, size);
3448 kfree(old_driver_ver);
3451 /* This does a hard reset of the controller using PCI power management
3452 * states or the using the doorbell register.
3454 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3458 u64 cfg_base_addr_index;
3459 void __iomem *vaddr;
3460 unsigned long paddr;
3461 u32 misc_fw_support;
3463 struct CfgTable __iomem *cfgtable;
3466 u16 command_register;
3468 /* For controllers as old as the P600, this is very nearly
3471 * pci_save_state(pci_dev);
3472 * pci_set_power_state(pci_dev, PCI_D3hot);
3473 * pci_set_power_state(pci_dev, PCI_D0);
3474 * pci_restore_state(pci_dev);
3476 * For controllers newer than the P600, the pci power state
3477 * method of resetting doesn't work so we have another way
3478 * using the doorbell register.
3481 rc = hpsa_lookup_board_id(pdev, &board_id);
3482 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3483 dev_warn(&pdev->dev, "Not resetting device.\n");
3487 /* if controller is soft- but not hard resettable... */
3488 if (!ctlr_is_hard_resettable(board_id))
3489 return -ENOTSUPP; /* try soft reset later. */
3491 /* Save the PCI command register */
3492 pci_read_config_word(pdev, 4, &command_register);
3493 /* Turn the board off. This is so that later pci_restore_state()
3494 * won't turn the board on before the rest of config space is ready.
3496 pci_disable_device(pdev);
3497 pci_save_state(pdev);
3499 /* find the first memory BAR, so we can find the cfg table */
3500 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3503 vaddr = remap_pci_mem(paddr, 0x250);
3507 /* find cfgtable in order to check if reset via doorbell is supported */
3508 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3509 &cfg_base_addr_index, &cfg_offset);
3512 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3513 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3518 rc = write_driver_ver_to_cfgtable(cfgtable);
3522 /* If reset via doorbell register is supported, use that.
3523 * There are two such methods. Favor the newest method.
3525 misc_fw_support = readl(&cfgtable->misc_fw_support);
3526 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3528 use_doorbell = DOORBELL_CTLR_RESET2;
3530 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3532 dev_warn(&pdev->dev, "Soft reset not supported. "
3533 "Firmware update is required.\n");
3534 rc = -ENOTSUPP; /* try soft reset */
3535 goto unmap_cfgtable;
3539 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3541 goto unmap_cfgtable;
3543 pci_restore_state(pdev);
3544 rc = pci_enable_device(pdev);
3546 dev_warn(&pdev->dev, "failed to enable device.\n");
3547 goto unmap_cfgtable;
3549 pci_write_config_word(pdev, 4, command_register);
3551 /* Some devices (notably the HP Smart Array 5i Controller)
3552 need a little pause here */
3553 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3555 /* Wait for board to become not ready, then ready. */
3556 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3557 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3559 dev_warn(&pdev->dev,
3560 "failed waiting for board to reset."
3561 " Will try soft reset.\n");
3562 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3563 goto unmap_cfgtable;
3565 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3567 dev_warn(&pdev->dev,
3568 "failed waiting for board to become ready "
3569 "after hard reset\n");
3570 goto unmap_cfgtable;
3573 rc = controller_reset_failed(vaddr);
3575 goto unmap_cfgtable;
3577 dev_warn(&pdev->dev, "Unable to successfully reset "
3578 "controller. Will try soft reset.\n");
3581 dev_info(&pdev->dev, "board ready after hard reset.\n");
3593 * We cannot read the structure directly, for portability we must use
3595 * This is for debug only.
3597 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3603 dev_info(dev, "Controller Configuration information\n");
3604 dev_info(dev, "------------------------------------\n");
3605 for (i = 0; i < 4; i++)
3606 temp_name[i] = readb(&(tb->Signature[i]));
3607 temp_name[4] = '\0';
3608 dev_info(dev, " Signature = %s\n", temp_name);
3609 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3610 dev_info(dev, " Transport methods supported = 0x%x\n",
3611 readl(&(tb->TransportSupport)));
3612 dev_info(dev, " Transport methods active = 0x%x\n",
3613 readl(&(tb->TransportActive)));
3614 dev_info(dev, " Requested transport Method = 0x%x\n",
3615 readl(&(tb->HostWrite.TransportRequest)));
3616 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3617 readl(&(tb->HostWrite.CoalIntDelay)));
3618 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3619 readl(&(tb->HostWrite.CoalIntCount)));
3620 dev_info(dev, " Max outstanding commands = 0x%d\n",
3621 readl(&(tb->CmdsOutMax)));
3622 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3623 for (i = 0; i < 16; i++)
3624 temp_name[i] = readb(&(tb->ServerName[i]));
3625 temp_name[16] = '\0';
3626 dev_info(dev, " Server Name = %s\n", temp_name);
3627 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3628 readl(&(tb->HeartBeat)));
3629 #endif /* HPSA_DEBUG */
3632 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3634 int i, offset, mem_type, bar_type;
3636 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3639 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3640 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3641 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3644 mem_type = pci_resource_flags(pdev, i) &
3645 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3647 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3648 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3649 offset += 4; /* 32 bit */
3651 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3654 default: /* reserved in PCI 2.2 */
3655 dev_warn(&pdev->dev,
3656 "base address is invalid\n");
3661 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3667 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3668 * controllers that are capable. If not, we use IO-APIC mode.
3671 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3673 #ifdef CONFIG_PCI_MSI
3675 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3679 /* Some boards advertise MSI but don't really support it */
3680 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3681 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
3682 goto default_int_mode;
3683 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3684 dev_info(&h->pdev->dev, "MSIX\n");
3685 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
3687 h->intr[0] = hpsa_msix_entries[0].vector;
3688 h->intr[1] = hpsa_msix_entries[1].vector;
3689 h->intr[2] = hpsa_msix_entries[2].vector;
3690 h->intr[3] = hpsa_msix_entries[3].vector;
3695 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
3696 "available\n", err);
3697 goto default_int_mode;
3699 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
3701 goto default_int_mode;
3704 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3705 dev_info(&h->pdev->dev, "MSI\n");
3706 if (!pci_enable_msi(h->pdev))
3709 dev_warn(&h->pdev->dev, "MSI init failed\n");
3712 #endif /* CONFIG_PCI_MSI */
3713 /* if we get here we're going to use the default interrupt mode */
3714 h->intr[h->intr_mode] = h->pdev->irq;
3717 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3720 u32 subsystem_vendor_id, subsystem_device_id;
3722 subsystem_vendor_id = pdev->subsystem_vendor;
3723 subsystem_device_id = pdev->subsystem_device;
3724 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3725 subsystem_vendor_id;
3727 for (i = 0; i < ARRAY_SIZE(products); i++)
3728 if (*board_id == products[i].board_id)
3731 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3732 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3734 dev_warn(&pdev->dev, "unrecognized board ID: "
3735 "0x%08x, ignoring.\n", *board_id);
3738 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3741 static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3745 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3746 return ((command & PCI_COMMAND_MEMORY) == 0);
3749 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3750 unsigned long *memory_bar)
3754 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
3755 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3756 /* addressing mode bits already removed */
3757 *memory_bar = pci_resource_start(pdev, i);
3758 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3762 dev_warn(&pdev->dev, "no memory BAR found\n");
3766 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
3767 void __iomem *vaddr, int wait_for_ready)
3772 iterations = HPSA_BOARD_READY_ITERATIONS;
3774 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
3776 for (i = 0; i < iterations; i++) {
3777 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3778 if (wait_for_ready) {
3779 if (scratchpad == HPSA_FIRMWARE_READY)
3782 if (scratchpad != HPSA_FIRMWARE_READY)
3785 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3787 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3791 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3792 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3795 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3796 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3797 *cfg_base_addr &= (u32) 0x0000ffff;
3798 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3799 if (*cfg_base_addr_index == -1) {
3800 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3806 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3810 u64 cfg_base_addr_index;
3814 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3815 &cfg_base_addr_index, &cfg_offset);
3818 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
3819 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3822 rc = write_driver_ver_to_cfgtable(h->cfgtable);
3825 /* Find performant mode table. */
3826 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3827 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3828 cfg_base_addr_index)+cfg_offset+trans_offset,
3829 sizeof(*h->transtable));
3835 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3837 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3839 /* Limit commands in memory limited kdump scenario. */
3840 if (reset_devices && h->max_commands > 32)
3841 h->max_commands = 32;
3843 if (h->max_commands < 16) {
3844 dev_warn(&h->pdev->dev, "Controller reports "
3845 "max supported commands of %d, an obvious lie. "
3846 "Using 16. Ensure that firmware is up to date.\n",
3848 h->max_commands = 16;
3852 /* Interrogate the hardware for some limits:
3853 * max commands, max SG elements without chaining, and with chaining,
3854 * SG chain block size, etc.
3856 static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3858 hpsa_get_max_perf_mode_cmds(h);
3859 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3860 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3862 * Limit in-command s/g elements to 32 save dma'able memory.
3863 * Howvever spec says if 0, use 31
3865 h->max_cmd_sg_entries = 31;
3866 if (h->maxsgentries > 512) {
3867 h->max_cmd_sg_entries = 32;
3868 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3869 h->maxsgentries--; /* save one for chain pointer */
3871 h->maxsgentries = 31; /* default to traditional values */
3876 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3878 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3879 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3880 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3881 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3882 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3888 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3889 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3894 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3896 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3900 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3901 * in a prefetch beyond physical memory.
3903 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3907 if (h->board_id != 0x3225103C)
3909 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3910 dma_prefetch |= 0x8000;
3911 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3914 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
3918 unsigned long flags;
3920 /* under certain very rare conditions, this can take awhile.
3921 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3922 * as we enter this code.)
3924 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3925 spin_lock_irqsave(&h->lock, flags);
3926 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
3927 spin_unlock_irqrestore(&h->lock, flags);
3928 if (!(doorbell_value & CFGTBL_ChangeReq))
3930 /* delay and try again */
3931 usleep_range(10000, 20000);
3935 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3939 trans_support = readl(&(h->cfgtable->TransportSupport));
3940 if (!(trans_support & SIMPLE_MODE))
3943 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3944 /* Update the field, and then ring the doorbell */
3945 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3946 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3947 hpsa_wait_for_mode_change_ack(h);
3948 print_cfg_table(&h->pdev->dev, h->cfgtable);
3949 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3950 dev_warn(&h->pdev->dev,
3951 "unable to get board into simple mode\n");
3954 h->transMethod = CFGTBL_Trans_Simple;
3958 static int __devinit hpsa_pci_init(struct ctlr_info *h)
3960 int prod_index, err;
3962 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3965 h->product_name = products[prod_index].product_name;
3966 h->access = *(products[prod_index].access);
3968 if (hpsa_board_disabled(h->pdev)) {
3969 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3973 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
3974 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
3976 err = pci_enable_device(h->pdev);
3978 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
3982 err = pci_request_regions(h->pdev, "hpsa");
3984 dev_err(&h->pdev->dev,
3985 "cannot obtain PCI resources, aborting\n");
3988 hpsa_interrupt_mode(h);
3989 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3991 goto err_out_free_res;
3992 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3995 goto err_out_free_res;
3997 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
3999 goto err_out_free_res;
4000 err = hpsa_find_cfgtables(h);
4002 goto err_out_free_res;
4003 hpsa_find_board_params(h);
4005 if (!hpsa_CISS_signature_present(h)) {
4007 goto err_out_free_res;
4009 hpsa_enable_scsi_prefetch(h);
4010 hpsa_p600_dma_prefetch_quirk(h);
4011 err = hpsa_enter_simple_mode(h);
4013 goto err_out_free_res;
4018 iounmap(h->transtable);
4020 iounmap(h->cfgtable);
4024 * Deliberately omit pci_disable_device(): it does something nasty to
4025 * Smart Array controllers that pci_enable_device does not undo
4027 pci_release_regions(h->pdev);
4031 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
4035 #define HBA_INQUIRY_BYTE_COUNT 64
4036 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
4037 if (!h->hba_inquiry_data)
4039 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
4040 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
4042 kfree(h->hba_inquiry_data);
4043 h->hba_inquiry_data = NULL;
4047 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
4054 /* Reset the controller with a PCI power-cycle or via doorbell */
4055 rc = hpsa_kdump_hard_reset_controller(pdev);
4057 /* -ENOTSUPP here means we cannot reset the controller
4058 * but it's already (and still) up and running in
4059 * "performant mode". Or, it might be 640x, which can't reset
4060 * due to concerns about shared bbwc between 6402/6404 pair.
4062 if (rc == -ENOTSUPP)
4063 return rc; /* just try to do the kdump anyhow. */
4067 /* Now try to get the controller to respond to a no-op */
4068 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4069 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
4070 if (hpsa_noop(pdev) == 0)
4073 dev_warn(&pdev->dev, "no-op failed%s\n",
4074 (i < 11 ? "; re-trying" : ""));
4079 static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
4081 h->cmd_pool_bits = kzalloc(
4082 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4083 sizeof(unsigned long), GFP_KERNEL);
4084 h->cmd_pool = pci_alloc_consistent(h->pdev,
4085 h->nr_cmds * sizeof(*h->cmd_pool),
4086 &(h->cmd_pool_dhandle));
4087 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4088 h->nr_cmds * sizeof(*h->errinfo_pool),
4089 &(h->errinfo_pool_dhandle));
4090 if ((h->cmd_pool_bits == NULL)
4091 || (h->cmd_pool == NULL)
4092 || (h->errinfo_pool == NULL)) {
4093 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
4099 static void hpsa_free_cmd_pool(struct ctlr_info *h)
4101 kfree(h->cmd_pool_bits);
4103 pci_free_consistent(h->pdev,
4104 h->nr_cmds * sizeof(struct CommandList),
4105 h->cmd_pool, h->cmd_pool_dhandle);
4106 if (h->errinfo_pool)
4107 pci_free_consistent(h->pdev,
4108 h->nr_cmds * sizeof(struct ErrorInfo),
4110 h->errinfo_pool_dhandle);
4113 static int hpsa_request_irq(struct ctlr_info *h,
4114 irqreturn_t (*msixhandler)(int, void *),
4115 irqreturn_t (*intxhandler)(int, void *))
4119 if (h->msix_vector || h->msi_vector)
4120 rc = request_irq(h->intr[h->intr_mode], msixhandler,
4123 rc = request_irq(h->intr[h->intr_mode], intxhandler,
4124 IRQF_SHARED, h->devname, h);
4126 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4127 h->intr[h->intr_mode], h->devname);
4133 static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4135 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4136 HPSA_RESET_TYPE_CONTROLLER)) {
4137 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4141 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4142 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4143 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4147 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4148 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4149 dev_warn(&h->pdev->dev, "Board failed to become ready "
4150 "after soft reset.\n");
4157 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4159 free_irq(h->intr[h->intr_mode], h);
4160 #ifdef CONFIG_PCI_MSI
4162 pci_disable_msix(h->pdev);
4163 else if (h->msi_vector)
4164 pci_disable_msi(h->pdev);
4165 #endif /* CONFIG_PCI_MSI */
4166 hpsa_free_sg_chain_blocks(h);
4167 hpsa_free_cmd_pool(h);
4168 kfree(h->blockFetchTable);
4169 pci_free_consistent(h->pdev, h->reply_pool_size,
4170 h->reply_pool, h->reply_pool_dhandle);
4174 iounmap(h->transtable);
4176 iounmap(h->cfgtable);
4177 pci_release_regions(h->pdev);
4181 static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
4183 assert_spin_locked(&lockup_detector_lock);
4184 if (!hpsa_lockup_detector)
4186 if (h->lockup_detected)
4187 return; /* already stopped the lockup detector */
4188 list_del(&h->lockup_list);
4191 /* Called when controller lockup detected. */
4192 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
4194 struct CommandList *c = NULL;
4196 assert_spin_locked(&h->lock);
4197 /* Mark all outstanding commands as failed and complete them. */
4198 while (!list_empty(list)) {
4199 c = list_entry(list->next, struct CommandList, list);
4200 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4201 finish_cmd(c, c->Header.Tag.lower);
4205 static void controller_lockup_detected(struct ctlr_info *h)
4207 unsigned long flags;
4209 assert_spin_locked(&lockup_detector_lock);
4210 remove_ctlr_from_lockup_detector_list(h);
4211 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4212 spin_lock_irqsave(&h->lock, flags);
4213 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
4214 spin_unlock_irqrestore(&h->lock, flags);
4215 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
4216 h->lockup_detected);
4217 pci_disable_device(h->pdev);
4218 spin_lock_irqsave(&h->lock, flags);
4219 fail_all_cmds_on_list(h, &h->cmpQ);
4220 fail_all_cmds_on_list(h, &h->reqQ);
4221 spin_unlock_irqrestore(&h->lock, flags);
4224 static void detect_controller_lockup(struct ctlr_info *h)
4228 unsigned long flags;
4230 assert_spin_locked(&lockup_detector_lock);
4231 now = get_jiffies_64();
4232 /* If we've received an interrupt recently, we're ok. */
4233 if (time_after64(h->last_intr_timestamp +
4234 (h->heartbeat_sample_interval), now))
4238 * If we've already checked the heartbeat recently, we're ok.
4239 * This could happen if someone sends us a signal. We
4240 * otherwise don't care about signals in this thread.
4242 if (time_after64(h->last_heartbeat_timestamp +
4243 (h->heartbeat_sample_interval), now))
4246 /* If heartbeat has not changed since we last looked, we're not ok. */
4247 spin_lock_irqsave(&h->lock, flags);
4248 heartbeat = readl(&h->cfgtable->HeartBeat);
4249 spin_unlock_irqrestore(&h->lock, flags);
4250 if (h->last_heartbeat == heartbeat) {
4251 controller_lockup_detected(h);
4256 h->last_heartbeat = heartbeat;
4257 h->last_heartbeat_timestamp = now;
4260 static int detect_controller_lockup_thread(void *notused)
4262 struct ctlr_info *h;
4263 unsigned long flags;
4266 struct list_head *this, *tmp;
4268 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
4269 if (kthread_should_stop())
4271 spin_lock_irqsave(&lockup_detector_lock, flags);
4272 list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
4273 h = list_entry(this, struct ctlr_info, lockup_list);
4274 detect_controller_lockup(h);
4276 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4281 static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
4283 unsigned long flags;
4285 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
4286 spin_lock_irqsave(&lockup_detector_lock, flags);
4287 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
4288 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4291 static void start_controller_lockup_detector(struct ctlr_info *h)
4293 /* Start the lockup detector thread if not already started */
4294 if (!hpsa_lockup_detector) {
4295 spin_lock_init(&lockup_detector_lock);
4296 hpsa_lockup_detector =
4297 kthread_run(detect_controller_lockup_thread,
4300 if (!hpsa_lockup_detector) {
4301 dev_warn(&h->pdev->dev,
4302 "Could not start lockup detector thread\n");
4305 add_ctlr_to_lockup_detector_list(h);
4308 static void stop_controller_lockup_detector(struct ctlr_info *h)
4310 unsigned long flags;
4312 spin_lock_irqsave(&lockup_detector_lock, flags);
4313 remove_ctlr_from_lockup_detector_list(h);
4314 /* If the list of ctlr's to monitor is empty, stop the thread */
4315 if (list_empty(&hpsa_ctlr_list)) {
4316 kthread_stop(hpsa_lockup_detector);
4317 hpsa_lockup_detector = NULL;
4319 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4322 static int __devinit hpsa_init_one(struct pci_dev *pdev,
4323 const struct pci_device_id *ent)
4326 struct ctlr_info *h;
4327 int try_soft_reset = 0;
4328 unsigned long flags;
4330 if (number_of_controllers == 0)
4331 printk(KERN_INFO DRIVER_NAME "\n");
4333 rc = hpsa_init_reset_devices(pdev);
4335 if (rc != -ENOTSUPP)
4337 /* If the reset fails in a particular way (it has no way to do
4338 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4339 * a soft reset once we get the controller configured up to the
4340 * point that it can accept a command.
4346 reinit_after_soft_reset:
4348 /* Command structures must be aligned on a 32-byte boundary because
4349 * the 5 lower bits of the address are used by the hardware. and by
4350 * the driver. See comments in hpsa.h for more info.
4352 #define COMMANDLIST_ALIGNMENT 32
4353 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
4354 h = kzalloc(sizeof(*h), GFP_KERNEL);
4359 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
4360 INIT_LIST_HEAD(&h->cmpQ);
4361 INIT_LIST_HEAD(&h->reqQ);
4362 spin_lock_init(&h->lock);
4363 spin_lock_init(&h->scan_lock);
4364 rc = hpsa_pci_init(h);
4368 sprintf(h->devname, "hpsa%d", number_of_controllers);
4369 h->ctlr = number_of_controllers;
4370 number_of_controllers++;
4372 /* configure PCI DMA stuff */
4373 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4377 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4381 dev_err(&pdev->dev, "no suitable DMA available\n");
4386 /* make sure the board interrupts are off */
4387 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4389 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
4391 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
4392 h->devname, pdev->device,
4393 h->intr[h->intr_mode], dac ? "" : " not");
4394 if (hpsa_allocate_cmd_pool(h))
4396 if (hpsa_allocate_sg_chain_blocks(h))
4398 init_waitqueue_head(&h->scan_wait_queue);
4399 h->scan_finished = 1; /* no scan currently in progress */
4401 pci_set_drvdata(pdev, h);
4403 h->scsi_host = NULL;
4404 spin_lock_init(&h->devlock);
4405 hpsa_put_ctlr_into_performant_mode(h);
4407 /* At this point, the controller is ready to take commands.
4408 * Now, if reset_devices and the hard reset didn't work, try
4409 * the soft reset and see if that works.
4411 if (try_soft_reset) {
4413 /* This is kind of gross. We may or may not get a completion
4414 * from the soft reset command, and if we do, then the value
4415 * from the fifo may or may not be valid. So, we wait 10 secs
4416 * after the reset throwing away any completions we get during
4417 * that time. Unregister the interrupt handler and register
4418 * fake ones to scoop up any residual completions.
4420 spin_lock_irqsave(&h->lock, flags);
4421 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4422 spin_unlock_irqrestore(&h->lock, flags);
4423 free_irq(h->intr[h->intr_mode], h);
4424 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4425 hpsa_intx_discard_completions);
4427 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4432 rc = hpsa_kdump_soft_reset(h);
4434 /* Neither hard nor soft reset worked, we're hosed. */
4437 dev_info(&h->pdev->dev, "Board READY.\n");
4438 dev_info(&h->pdev->dev,
4439 "Waiting for stale completions to drain.\n");
4440 h->access.set_intr_mask(h, HPSA_INTR_ON);
4442 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4444 rc = controller_reset_failed(h->cfgtable);
4446 dev_info(&h->pdev->dev,
4447 "Soft reset appears to have failed.\n");
4449 /* since the controller's reset, we have to go back and re-init
4450 * everything. Easiest to just forget what we've done and do it
4453 hpsa_undo_allocations_after_kdump_soft_reset(h);
4456 /* don't go to clean4, we already unallocated */
4459 goto reinit_after_soft_reset;
4462 /* Turn the interrupts on so we can service requests */
4463 h->access.set_intr_mask(h, HPSA_INTR_ON);
4465 hpsa_hba_inquiry(h);
4466 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
4467 start_controller_lockup_detector(h);
4471 hpsa_free_sg_chain_blocks(h);
4472 hpsa_free_cmd_pool(h);
4473 free_irq(h->intr[h->intr_mode], h);
4480 static void hpsa_flush_cache(struct ctlr_info *h)
4483 struct CommandList *c;
4485 flush_buf = kzalloc(4, GFP_KERNEL);
4489 c = cmd_special_alloc(h);
4491 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4494 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4495 RAID_CTLR_LUNID, TYPE_CMD);
4496 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
4497 if (c->err_info->CommandStatus != 0)
4498 dev_warn(&h->pdev->dev,
4499 "error flushing cache on controller\n");
4500 cmd_special_free(h, c);
4505 static void hpsa_shutdown(struct pci_dev *pdev)
4507 struct ctlr_info *h;
4509 h = pci_get_drvdata(pdev);
4510 /* Turn board interrupts off and send the flush cache command
4511 * sendcmd will turn off interrupt, and send the flush...
4512 * To write all data in the battery backed cache to disks
4514 hpsa_flush_cache(h);
4515 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4516 free_irq(h->intr[h->intr_mode], h);
4517 #ifdef CONFIG_PCI_MSI
4519 pci_disable_msix(h->pdev);
4520 else if (h->msi_vector)
4521 pci_disable_msi(h->pdev);
4522 #endif /* CONFIG_PCI_MSI */
4525 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4527 struct ctlr_info *h;
4529 if (pci_get_drvdata(pdev) == NULL) {
4530 dev_err(&pdev->dev, "unable to remove device\n");
4533 h = pci_get_drvdata(pdev);
4534 stop_controller_lockup_detector(h);
4535 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
4536 hpsa_shutdown(pdev);
4538 iounmap(h->transtable);
4539 iounmap(h->cfgtable);
4540 hpsa_free_sg_chain_blocks(h);
4541 pci_free_consistent(h->pdev,
4542 h->nr_cmds * sizeof(struct CommandList),
4543 h->cmd_pool, h->cmd_pool_dhandle);
4544 pci_free_consistent(h->pdev,
4545 h->nr_cmds * sizeof(struct ErrorInfo),
4546 h->errinfo_pool, h->errinfo_pool_dhandle);
4547 pci_free_consistent(h->pdev, h->reply_pool_size,
4548 h->reply_pool, h->reply_pool_dhandle);
4549 kfree(h->cmd_pool_bits);
4550 kfree(h->blockFetchTable);
4551 kfree(h->hba_inquiry_data);
4553 * Deliberately omit pci_disable_device(): it does something nasty to
4554 * Smart Array controllers that pci_enable_device does not undo
4556 pci_release_regions(pdev);
4557 pci_set_drvdata(pdev, NULL);
4561 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
4562 __attribute__((unused)) pm_message_t state)
4567 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
4572 static struct pci_driver hpsa_pci_driver = {
4574 .probe = hpsa_init_one,
4575 .remove = __devexit_p(hpsa_remove_one),
4576 .id_table = hpsa_pci_device_id, /* id_table */
4577 .shutdown = hpsa_shutdown,
4578 .suspend = hpsa_suspend,
4579 .resume = hpsa_resume,
4582 /* Fill in bucket_map[], given nsgs (the max number of
4583 * scatter gather elements supported) and bucket[],
4584 * which is an array of 8 integers. The bucket[] array
4585 * contains 8 different DMA transfer sizes (in 16
4586 * byte increments) which the controller uses to fetch
4587 * commands. This function fills in bucket_map[], which
4588 * maps a given number of scatter gather elements to one of
4589 * the 8 DMA transfer sizes. The point of it is to allow the
4590 * controller to only do as much DMA as needed to fetch the
4591 * command, with the DMA transfer size encoded in the lower
4592 * bits of the command address.
4594 static void calc_bucket_map(int bucket[], int num_buckets,
4595 int nsgs, int *bucket_map)
4599 /* even a command with 0 SGs requires 4 blocks */
4600 #define MINIMUM_TRANSFER_BLOCKS 4
4601 #define NUM_BUCKETS 8
4602 /* Note, bucket_map must have nsgs+1 entries. */
4603 for (i = 0; i <= nsgs; i++) {
4604 /* Compute size of a command with i SG entries */
4605 size = i + MINIMUM_TRANSFER_BLOCKS;
4606 b = num_buckets; /* Assume the biggest bucket */
4607 /* Find the bucket that is just big enough */
4608 for (j = 0; j < 8; j++) {
4609 if (bucket[j] >= size) {
4614 /* for a command with i SG entries, use bucket b. */
4619 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4623 unsigned long register_value;
4625 /* This is a bit complicated. There are 8 registers on
4626 * the controller which we write to to tell it 8 different
4627 * sizes of commands which there may be. It's a way of
4628 * reducing the DMA done to fetch each command. Encoded into
4629 * each command's tag are 3 bits which communicate to the controller
4630 * which of the eight sizes that command fits within. The size of
4631 * each command depends on how many scatter gather entries there are.
4632 * Each SG entry requires 16 bytes. The eight registers are programmed
4633 * with the number of 16-byte blocks a command of that size requires.
4634 * The smallest command possible requires 5 such 16 byte blocks.
4635 * the largest command possible requires MAXSGENTRIES + 4 16-byte
4636 * blocks. Note, this only extends to the SG entries contained
4637 * within the command block, and does not extend to chained blocks
4638 * of SG elements. bft[] contains the eight values we write to
4639 * the registers. They are not evenly distributed, but have more
4640 * sizes for small commands, and fewer sizes for larger commands.
4642 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
4643 BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
4644 /* 5 = 1 s/g entry or 4k
4645 * 6 = 2 s/g entry or 8k
4646 * 8 = 4 s/g entry or 16k
4647 * 10 = 6 s/g entry or 24k
4650 h->reply_pool_wraparound = 1; /* spec: init to 1 */
4652 /* Controller spec: zero out this buffer. */
4653 memset(h->reply_pool, 0, h->reply_pool_size);
4654 h->reply_pool_head = h->reply_pool;
4656 bft[7] = h->max_sg_entries + 4;
4657 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
4658 for (i = 0; i < 8; i++)
4659 writel(bft[i], &h->transtable->BlockFetch[i]);
4661 /* size of controller ring buffer */
4662 writel(h->max_commands, &h->transtable->RepQSize);
4663 writel(1, &h->transtable->RepQCount);
4664 writel(0, &h->transtable->RepQCtrAddrLow32);
4665 writel(0, &h->transtable->RepQCtrAddrHigh32);
4666 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4667 writel(0, &h->transtable->RepQAddr0High32);
4668 writel(CFGTBL_Trans_Performant | use_short_tags,
4669 &(h->cfgtable->HostWrite.TransportRequest));
4670 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4671 hpsa_wait_for_mode_change_ack(h);
4672 register_value = readl(&(h->cfgtable->TransportActive));
4673 if (!(register_value & CFGTBL_Trans_Performant)) {
4674 dev_warn(&h->pdev->dev, "unable to get board into"
4675 " performant mode\n");
4678 /* Change the access methods to the performant access methods */
4679 h->access = SA5_performant_access;
4680 h->transMethod = CFGTBL_Trans_Performant;
4683 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4687 if (hpsa_simple_mode)
4690 trans_support = readl(&(h->cfgtable->TransportSupport));
4691 if (!(trans_support & PERFORMANT_MODE))
4694 hpsa_get_max_perf_mode_cmds(h);
4695 h->max_sg_entries = 32;
4696 /* Performant mode ring buffer and supporting data structures */
4697 h->reply_pool_size = h->max_commands * sizeof(u64);
4698 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4699 &(h->reply_pool_dhandle));
4701 /* Need a block fetch table for performant mode */
4702 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
4703 sizeof(u32)), GFP_KERNEL);
4705 if ((h->reply_pool == NULL)
4706 || (h->blockFetchTable == NULL))
4709 hpsa_enter_performant_mode(h,
4710 trans_support & CFGTBL_Trans_use_short_tags);
4716 pci_free_consistent(h->pdev, h->reply_pool_size,
4717 h->reply_pool, h->reply_pool_dhandle);
4718 kfree(h->blockFetchTable);
4722 * This is it. Register the PCI driver information for the cards we control
4723 * the OS will call our registered routines when it finds one of our cards.
4725 static int __init hpsa_init(void)
4727 return pci_register_driver(&hpsa_pci_driver);
4730 static void __exit hpsa_cleanup(void)
4732 pci_unregister_driver(&hpsa_pci_driver);
4735 module_init(hpsa_init);
4736 module_exit(hpsa_cleanup);