2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
87 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89 static unsigned int ipr_max_speed = 1;
90 static int ipr_testmode = 0;
91 static unsigned int ipr_fastfail = 0;
92 static unsigned int ipr_transop_timeout = 0;
93 static unsigned int ipr_enable_cache = 1;
94 static unsigned int ipr_debug = 0;
95 static DEFINE_SPINLOCK(ipr_driver_lock);
97 /* This table describes the differences between DMA controller chips */
98 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101 .cache_line_size = 0x20,
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
114 { /* Snipe and Scamp */
116 .cache_line_size = 0x20,
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
131 static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
141 static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147 module_param_named(max_speed, ipr_max_speed, uint, 0);
148 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149 module_param_named(log_level, ipr_log_level, uint, 0);
150 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151 module_param_named(testmode, ipr_testmode, int, 0);
152 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153 module_param_named(fastfail, ipr_fastfail, int, 0);
154 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
157 module_param_named(enable_cache, ipr_enable_cache, int, 0);
158 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159 module_param_named(debug, ipr_debug, int, 0);
160 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
161 MODULE_LICENSE("GPL");
162 MODULE_VERSION(IPR_DRIVER_VERSION);
164 /* A constant array of IOASCs/URCs/Error Messages */
166 struct ipr_error_table_t ipr_error_table[] = {
167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
168 "8155: An unknown error was received"},
170 "Soft underlength error"},
172 "Command to be cancelled not found"},
174 "Qualified success"},
175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
176 "FFFE: Soft device bus error recovered by the IOA"},
177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
178 "4101: Soft device bus fabric error"},
179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
180 "FFF9: Device sector reassign successful"},
181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
182 "FFF7: Media error recovered by device rewrite procedures"},
183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
184 "7001: IOA sector reassignment successful"},
185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
186 "FFF9: Soft media error. Sector reassignment recommended"},
187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
188 "FFF7: Media error recovered by IOA rewrite procedures"},
189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
190 "FF3D: Soft PCI bus error recovered by the IOA"},
191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
192 "FFF6: Device hardware error recovered by the IOA"},
193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
194 "FFF6: Device hardware error recovered by the device"},
195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
196 "FF3D: Soft IOA error recovered by the IOA"},
197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
198 "FFFA: Undefined device response recovered by the IOA"},
199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
200 "FFF6: Device bus error, message or command phase"},
201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
202 "FFFE: Task Management Function failed"},
203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
204 "FFF6: Failure prediction threshold exceeded"},
205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
206 "8009: Impending cache battery pack failure"},
208 "34FF: Disk device format in progress"},
210 "Synchronization required"},
212 "No ready, IOA shutdown"},
214 "Not ready, IOA has been shutdown"},
215 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
216 "3020: Storage subsystem configuration error"},
218 "FFF5: Medium error, data unreadable, recommend reassign"},
220 "7000: Medium error, data unreadable, do not reassign"},
221 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
222 "FFF3: Disk media format bad"},
223 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
224 "3002: Addressed device failed to respond to selection"},
225 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
226 "3100: Device bus error"},
227 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
228 "3109: IOA timed out a device command"},
230 "3120: SCSI bus is not operational"},
231 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
232 "4100: Hard device bus fabric error"},
233 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "9000: IOA reserved area data check"},
235 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
236 "9001: IOA reserved area invalid data pattern"},
237 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
238 "9002: IOA reserved area LRC error"},
239 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "102E: Out of alternate sectors for disk storage"},
241 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
242 "FFF4: Data transfer underlength error"},
243 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
244 "FFF4: Data transfer overlength error"},
245 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "3400: Logical unit failure"},
247 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFF4: Device microcode is corrupt"},
249 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
250 "8150: PCI bus error"},
252 "Unsupported device bus message received"},
253 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
254 "FFF4: Disk device problem"},
255 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
256 "8150: Permanent IOA failure"},
257 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
258 "3010: Disk device returned wrong response to IOA"},
259 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
260 "8151: IOA microcode error"},
262 "Device bus status error"},
263 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
264 "8157: IOA error requiring IOA reset to recover"},
266 "ATA device status error"},
268 "Message reject received from the device"},
269 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
270 "8008: A permanent cache battery pack failure occurred"},
271 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
272 "9090: Disk unit has been modified after the last known status"},
273 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
274 "9081: IOA detected device error"},
275 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
276 "9082: IOA detected device error"},
277 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
278 "3110: Device bus error, message or command phase"},
279 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
280 "3110: SAS Command / Task Management Function failed"},
281 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
282 "9091: Incorrect hardware configuration change has been detected"},
283 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9073: Invalid multi-adapter configuration"},
285 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
286 "4010: Incorrect connection between cascaded expanders"},
287 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
288 "4020: Connections exceed IOA design limits"},
289 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
290 "4030: Incorrect multipath connection"},
291 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
292 "4110: Unsupported enclosure function"},
293 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
294 "FFF4: Command to logical unit failed"},
296 "Illegal request, invalid request type or request packet"},
298 "Illegal request, invalid resource handle"},
300 "Illegal request, commands not allowed to this device"},
302 "Illegal request, command not allowed to a secondary adapter"},
304 "Illegal request, invalid field in parameter list"},
306 "Illegal request, parameter not supported"},
308 "Illegal request, parameter value invalid"},
310 "Illegal request, command sequence error"},
312 "Illegal request, dual adapter support not enabled"},
313 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
314 "9031: Array protection temporarily suspended, protection resuming"},
315 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
316 "9040: Array protection temporarily suspended, protection resuming"},
317 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
318 "3140: Device bus not ready to ready transition"},
319 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "FFFB: SCSI bus was reset"},
322 "FFFE: SCSI bus transition to single ended"},
324 "FFFE: SCSI bus transition to LVD"},
325 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
326 "FFFB: SCSI bus was reset by another initiator"},
327 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "3029: A device replacement has occurred"},
329 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
330 "9051: IOA cache data exists for a missing or failed device"},
331 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
332 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
333 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
334 "9025: Disk unit is not supported at its physical location"},
335 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
336 "3020: IOA detected a SCSI bus configuration error"},
337 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
338 "3150: SCSI bus configuration error"},
339 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
340 "9074: Asymmetric advanced function disk configuration"},
341 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
342 "4040: Incomplete multipath connection between IOA and enclosure"},
343 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
344 "4041: Incomplete multipath connection between enclosure and device"},
345 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
346 "9075: Incomplete multipath connection between IOA and remote IOA"},
347 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
348 "9076: Configuration error, missing remote IOA"},
349 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
350 "4050: Enclosure does not support a required multipath function"},
351 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
352 "9041: Array protection temporarily suspended"},
353 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
354 "9042: Corrupt array parity detected on specified device"},
355 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
356 "9030: Array no longer protected due to missing or failed disk unit"},
357 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
358 "9071: Link operational transition"},
359 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
360 "9072: Link not operational transition"},
361 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
362 "9032: Array exposed but still protected"},
363 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
364 "70DD: Device forced failed by disrupt device command"},
365 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
366 "4061: Multipath redundancy level got better"},
367 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
368 "4060: Multipath redundancy level got worse"},
370 "Failure due to other device"},
371 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
372 "9008: IOA does not support functions expected by devices"},
373 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
374 "9010: Cache data associated with attached devices cannot be found"},
375 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
376 "9011: Cache data belongs to devices other than those attached"},
377 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
378 "9020: Array missing 2 or more devices with only 1 device present"},
379 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
380 "9021: Array missing 2 or more devices with 2 or more devices present"},
381 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9022: Exposed array is missing a required device"},
383 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
384 "9023: Array member(s) not at required physical locations"},
385 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
386 "9024: Array not functional due to present hardware configuration"},
387 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
388 "9026: Array not functional due to present hardware configuration"},
389 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
390 "9027: Array is missing a device and parity is out of sync"},
391 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
392 "9028: Maximum number of arrays already exist"},
393 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
394 "9050: Required cache data cannot be located for a disk unit"},
395 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
396 "9052: Cache data exists for a device that has been modified"},
397 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
398 "9054: IOA resources not available due to previous problems"},
399 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
400 "9092: Disk unit requires initialization before use"},
401 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
402 "9029: Incorrect hardware configuration change has been detected"},
403 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
404 "9060: One or more disk pairs are missing from an array"},
405 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
406 "9061: One or more disks are missing from an array"},
407 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
408 "9062: One or more disks are missing from an array"},
409 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
410 "9063: Maximum number of functional arrays has been exceeded"},
412 "Aborted command, invalid descriptor"},
414 "Command terminated by host"}
417 static const struct ipr_ses_table_entry ipr_ses_table[] = {
418 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
419 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
420 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
421 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
422 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
423 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
424 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
425 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
426 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
427 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
429 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
430 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
434 * Function Prototypes
436 static int ipr_reset_alert(struct ipr_cmnd *);
437 static void ipr_process_ccn(struct ipr_cmnd *);
438 static void ipr_process_error(struct ipr_cmnd *);
439 static void ipr_reset_ioa_job(struct ipr_cmnd *);
440 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
441 enum ipr_shutdown_type);
443 #ifdef CONFIG_SCSI_IPR_TRACE
445 * ipr_trc_hook - Add a trace entry to the driver trace
446 * @ipr_cmd: ipr command struct
448 * @add_data: additional data
453 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
454 u8 type, u32 add_data)
456 struct ipr_trace_entry *trace_entry;
457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
459 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
460 trace_entry->time = jiffies;
461 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
462 trace_entry->type = type;
463 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
464 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
465 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
466 trace_entry->u.add_data = add_data;
469 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
473 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
474 * @ipr_cmd: ipr command struct
479 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
482 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
483 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 ioarcb->write_data_transfer_length = 0;
487 ioarcb->read_data_transfer_length = 0;
488 ioarcb->write_ioadl_len = 0;
489 ioarcb->read_ioadl_len = 0;
490 ioarcb->write_ioadl_addr =
491 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
492 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
494 ioasa->residual_data_len = 0;
495 ioasa->u.gata.status = 0;
497 ipr_cmd->scsi_cmd = NULL;
499 ipr_cmd->sense_buffer[0] = 0;
500 ipr_cmd->dma_use_sg = 0;
504 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
505 * @ipr_cmd: ipr command struct
510 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
512 ipr_reinit_ipr_cmnd(ipr_cmd);
513 ipr_cmd->u.scratch = 0;
514 ipr_cmd->sibling = NULL;
515 init_timer(&ipr_cmd->timer);
519 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
520 * @ioa_cfg: ioa config struct
523 * pointer to ipr command struct
526 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
528 struct ipr_cmnd *ipr_cmd;
530 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
531 list_del(&ipr_cmd->queue);
532 ipr_init_ipr_cmnd(ipr_cmd);
538 * ipr_unmap_sglist - Unmap scatterlist if mapped
539 * @ioa_cfg: ioa config struct
540 * @ipr_cmd: ipr command struct
545 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
546 struct ipr_cmnd *ipr_cmd)
548 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
550 if (ipr_cmd->dma_use_sg) {
551 if (scsi_cmd->use_sg > 0) {
552 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
554 scsi_cmd->sc_data_direction);
556 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
557 scsi_cmd->request_bufflen,
558 scsi_cmd->sc_data_direction);
564 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
565 * @ioa_cfg: ioa config struct
566 * @clr_ints: interrupts to clear
568 * This function masks all interrupts on the adapter, then clears the
569 * interrupts specified in the mask
574 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
577 volatile u32 int_reg;
579 /* Stop new interrupts */
580 ioa_cfg->allow_interrupts = 0;
582 /* Set interrupt mask to stop all new interrupts */
583 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
585 /* Clear any pending interrupts */
586 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
587 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
591 * ipr_save_pcix_cmd_reg - Save PCI-X command register
592 * @ioa_cfg: ioa config struct
595 * 0 on success / -EIO on failure
597 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
599 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
601 if (pcix_cmd_reg == 0)
604 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
605 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
606 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
610 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
615 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
616 * @ioa_cfg: ioa config struct
619 * 0 on success / -EIO on failure
621 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
623 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
626 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
627 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
628 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
637 * ipr_sata_eh_done - done function for aborted SATA commands
638 * @ipr_cmd: ipr command struct
640 * This function is invoked for ops generated to SATA
641 * devices which are being aborted.
646 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
649 struct ata_queued_cmd *qc = ipr_cmd->qc;
650 struct ipr_sata_port *sata_port = qc->ap->private_data;
652 qc->err_mask |= AC_ERR_OTHER;
653 sata_port->ioasa.status |= ATA_BUSY;
654 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
659 * ipr_scsi_eh_done - mid-layer done function for aborted ops
660 * @ipr_cmd: ipr command struct
662 * This function is invoked by the interrupt handler for
663 * ops generated by the SCSI mid-layer which are being aborted.
668 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
671 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
673 scsi_cmd->result |= (DID_ERROR << 16);
675 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
676 scsi_cmd->scsi_done(scsi_cmd);
677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
681 * ipr_fail_all_ops - Fails all outstanding ops.
682 * @ioa_cfg: ioa config struct
684 * This function fails all outstanding ops.
689 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
691 struct ipr_cmnd *ipr_cmd, *temp;
694 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
695 list_del(&ipr_cmd->queue);
697 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
698 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
700 if (ipr_cmd->scsi_cmd)
701 ipr_cmd->done = ipr_scsi_eh_done;
702 else if (ipr_cmd->qc)
703 ipr_cmd->done = ipr_sata_eh_done;
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
706 del_timer(&ipr_cmd->timer);
707 ipr_cmd->done(ipr_cmd);
714 * ipr_do_req - Send driver initiated requests.
715 * @ipr_cmd: ipr command struct
716 * @done: done function
717 * @timeout_func: timeout function
718 * @timeout: timeout value
720 * This function sends the specified command to the adapter with the
721 * timeout given. The done function is invoked on command completion.
726 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
727 void (*done) (struct ipr_cmnd *),
728 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
730 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
732 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
734 ipr_cmd->done = done;
736 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
737 ipr_cmd->timer.expires = jiffies + timeout;
738 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
740 add_timer(&ipr_cmd->timer);
742 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
745 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
746 ioa_cfg->regs.ioarrin_reg);
750 * ipr_internal_cmd_done - Op done function for an internally generated op.
751 * @ipr_cmd: ipr command struct
753 * This function is the op done function for an internally generated,
754 * blocking op. It simply wakes the sleeping thread.
759 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
761 if (ipr_cmd->sibling)
762 ipr_cmd->sibling = NULL;
764 complete(&ipr_cmd->completion);
768 * ipr_send_blocking_cmd - Send command and sleep on its completion.
769 * @ipr_cmd: ipr command struct
770 * @timeout_func: function to invoke if command times out
776 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
777 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
780 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
782 init_completion(&ipr_cmd->completion);
783 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
785 spin_unlock_irq(ioa_cfg->host->host_lock);
786 wait_for_completion(&ipr_cmd->completion);
787 spin_lock_irq(ioa_cfg->host->host_lock);
791 * ipr_send_hcam - Send an HCAM to the adapter.
792 * @ioa_cfg: ioa config struct
794 * @hostrcb: hostrcb struct
796 * This function will send a Host Controlled Async command to the adapter.
797 * If HCAMs are currently not allowed to be issued to the adapter, it will
798 * place the hostrcb on the free queue.
803 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
804 struct ipr_hostrcb *hostrcb)
806 struct ipr_cmnd *ipr_cmd;
807 struct ipr_ioarcb *ioarcb;
809 if (ioa_cfg->allow_cmds) {
810 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
811 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
812 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
814 ipr_cmd->u.hostrcb = hostrcb;
815 ioarcb = &ipr_cmd->ioarcb;
817 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
818 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
819 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
820 ioarcb->cmd_pkt.cdb[1] = type;
821 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
822 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
824 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
825 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
826 ipr_cmd->ioadl[0].flags_and_data_len =
827 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
828 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
830 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
831 ipr_cmd->done = ipr_process_ccn;
833 ipr_cmd->done = ipr_process_error;
835 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
838 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
839 ioa_cfg->regs.ioarrin_reg);
841 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
846 * ipr_init_res_entry - Initialize a resource entry struct.
847 * @res: resource entry struct
852 static void ipr_init_res_entry(struct ipr_resource_entry *res)
854 res->needs_sync_complete = 0;
857 res->del_from_ml = 0;
858 res->resetting_device = 0;
860 res->sata_port = NULL;
864 * ipr_handle_config_change - Handle a config change from the adapter
865 * @ioa_cfg: ioa config struct
871 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
872 struct ipr_hostrcb *hostrcb)
874 struct ipr_resource_entry *res = NULL;
875 struct ipr_config_table_entry *cfgte;
878 cfgte = &hostrcb->hcam.u.ccn.cfgte;
880 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
881 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
882 sizeof(cfgte->res_addr))) {
889 if (list_empty(&ioa_cfg->free_res_q)) {
890 ipr_send_hcam(ioa_cfg,
891 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
896 res = list_entry(ioa_cfg->free_res_q.next,
897 struct ipr_resource_entry, queue);
899 list_del(&res->queue);
900 ipr_init_res_entry(res);
901 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
904 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
906 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
908 res->del_from_ml = 1;
909 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
910 if (ioa_cfg->allow_ml_add_del)
911 schedule_work(&ioa_cfg->work_q);
913 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
914 } else if (!res->sdev) {
916 if (ioa_cfg->allow_ml_add_del)
917 schedule_work(&ioa_cfg->work_q);
920 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
924 * ipr_process_ccn - Op done function for a CCN.
925 * @ipr_cmd: ipr command struct
927 * This function is the op done function for a configuration
928 * change notification host controlled async from the adapter.
933 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
936 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
937 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
939 list_del(&hostrcb->queue);
940 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
943 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
944 dev_err(&ioa_cfg->pdev->dev,
945 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
947 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
949 ipr_handle_config_change(ioa_cfg, hostrcb);
954 * ipr_log_vpd - Log the passed VPD to the error log.
955 * @vpd: vendor/product id/sn struct
960 static void ipr_log_vpd(struct ipr_vpd *vpd)
962 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
963 + IPR_SERIAL_NUM_LEN];
965 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
966 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
968 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
969 ipr_err("Vendor/Product ID: %s\n", buffer);
971 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
972 buffer[IPR_SERIAL_NUM_LEN] = '\0';
973 ipr_err(" Serial Number: %s\n", buffer);
977 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
978 * @vpd: vendor/product id/sn/wwn struct
983 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
985 ipr_log_vpd(&vpd->vpd);
986 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
987 be32_to_cpu(vpd->wwid[1]));
991 * ipr_log_enhanced_cache_error - Log a cache error.
992 * @ioa_cfg: ioa config struct
993 * @hostrcb: hostrcb struct
998 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
999 struct ipr_hostrcb *hostrcb)
1001 struct ipr_hostrcb_type_12_error *error =
1002 &hostrcb->hcam.u.error.u.type_12_error;
1004 ipr_err("-----Current Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
1006 ipr_log_ext_vpd(&error->ioa_vpd);
1007 ipr_err("Adapter Card Information:\n");
1008 ipr_log_ext_vpd(&error->cfc_vpd);
1010 ipr_err("-----Expected Configuration-----\n");
1011 ipr_err("Cache Directory Card Information:\n");
1012 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1013 ipr_err("Adapter Card Information:\n");
1014 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1016 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1017 be32_to_cpu(error->ioa_data[0]),
1018 be32_to_cpu(error->ioa_data[1]),
1019 be32_to_cpu(error->ioa_data[2]));
1023 * ipr_log_cache_error - Log a cache error.
1024 * @ioa_cfg: ioa config struct
1025 * @hostrcb: hostrcb struct
1030 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1031 struct ipr_hostrcb *hostrcb)
1033 struct ipr_hostrcb_type_02_error *error =
1034 &hostrcb->hcam.u.error.u.type_02_error;
1036 ipr_err("-----Current Configuration-----\n");
1037 ipr_err("Cache Directory Card Information:\n");
1038 ipr_log_vpd(&error->ioa_vpd);
1039 ipr_err("Adapter Card Information:\n");
1040 ipr_log_vpd(&error->cfc_vpd);
1042 ipr_err("-----Expected Configuration-----\n");
1043 ipr_err("Cache Directory Card Information:\n");
1044 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1045 ipr_err("Adapter Card Information:\n");
1046 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1048 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1049 be32_to_cpu(error->ioa_data[0]),
1050 be32_to_cpu(error->ioa_data[1]),
1051 be32_to_cpu(error->ioa_data[2]));
1055 * ipr_log_enhanced_config_error - Log a configuration error.
1056 * @ioa_cfg: ioa config struct
1057 * @hostrcb: hostrcb struct
1062 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1063 struct ipr_hostrcb *hostrcb)
1065 int errors_logged, i;
1066 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1067 struct ipr_hostrcb_type_13_error *error;
1069 error = &hostrcb->hcam.u.error.u.type_13_error;
1070 errors_logged = be32_to_cpu(error->errors_logged);
1072 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1073 be32_to_cpu(error->errors_detected), errors_logged);
1075 dev_entry = error->dev;
1077 for (i = 0; i < errors_logged; i++, dev_entry++) {
1080 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1081 ipr_log_ext_vpd(&dev_entry->vpd);
1083 ipr_err("-----New Device Information-----\n");
1084 ipr_log_ext_vpd(&dev_entry->new_vpd);
1086 ipr_err("Cache Directory Card Information:\n");
1087 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1089 ipr_err("Adapter Card Information:\n");
1090 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1095 * ipr_log_config_error - Log a configuration error.
1096 * @ioa_cfg: ioa config struct
1097 * @hostrcb: hostrcb struct
1102 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1103 struct ipr_hostrcb *hostrcb)
1105 int errors_logged, i;
1106 struct ipr_hostrcb_device_data_entry *dev_entry;
1107 struct ipr_hostrcb_type_03_error *error;
1109 error = &hostrcb->hcam.u.error.u.type_03_error;
1110 errors_logged = be32_to_cpu(error->errors_logged);
1112 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1113 be32_to_cpu(error->errors_detected), errors_logged);
1115 dev_entry = error->dev;
1117 for (i = 0; i < errors_logged; i++, dev_entry++) {
1120 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1121 ipr_log_vpd(&dev_entry->vpd);
1123 ipr_err("-----New Device Information-----\n");
1124 ipr_log_vpd(&dev_entry->new_vpd);
1126 ipr_err("Cache Directory Card Information:\n");
1127 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1129 ipr_err("Adapter Card Information:\n");
1130 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1132 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1133 be32_to_cpu(dev_entry->ioa_data[0]),
1134 be32_to_cpu(dev_entry->ioa_data[1]),
1135 be32_to_cpu(dev_entry->ioa_data[2]),
1136 be32_to_cpu(dev_entry->ioa_data[3]),
1137 be32_to_cpu(dev_entry->ioa_data[4]));
1142 * ipr_log_enhanced_array_error - Log an array configuration error.
1143 * @ioa_cfg: ioa config struct
1144 * @hostrcb: hostrcb struct
1149 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1150 struct ipr_hostrcb *hostrcb)
1153 struct ipr_hostrcb_type_14_error *error;
1154 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1155 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1157 error = &hostrcb->hcam.u.error.u.type_14_error;
1161 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1162 error->protection_level,
1163 ioa_cfg->host->host_no,
1164 error->last_func_vset_res_addr.bus,
1165 error->last_func_vset_res_addr.target,
1166 error->last_func_vset_res_addr.lun);
1170 array_entry = error->array_member;
1171 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1172 sizeof(error->array_member));
1174 for (i = 0; i < num_entries; i++, array_entry++) {
1175 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1178 if (be32_to_cpu(error->exposed_mode_adn) == i)
1179 ipr_err("Exposed Array Member %d:\n", i);
1181 ipr_err("Array Member %d:\n", i);
1183 ipr_log_ext_vpd(&array_entry->vpd);
1184 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1185 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1186 "Expected Location");
1193 * ipr_log_array_error - Log an array configuration error.
1194 * @ioa_cfg: ioa config struct
1195 * @hostrcb: hostrcb struct
1200 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1201 struct ipr_hostrcb *hostrcb)
1204 struct ipr_hostrcb_type_04_error *error;
1205 struct ipr_hostrcb_array_data_entry *array_entry;
1206 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1208 error = &hostrcb->hcam.u.error.u.type_04_error;
1212 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1213 error->protection_level,
1214 ioa_cfg->host->host_no,
1215 error->last_func_vset_res_addr.bus,
1216 error->last_func_vset_res_addr.target,
1217 error->last_func_vset_res_addr.lun);
1221 array_entry = error->array_member;
1223 for (i = 0; i < 18; i++) {
1224 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1227 if (be32_to_cpu(error->exposed_mode_adn) == i)
1228 ipr_err("Exposed Array Member %d:\n", i);
1230 ipr_err("Array Member %d:\n", i);
1232 ipr_log_vpd(&array_entry->vpd);
1234 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1235 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1236 "Expected Location");
1241 array_entry = error->array_member2;
1248 * ipr_log_hex_data - Log additional hex IOA error data.
1249 * @ioa_cfg: ioa config struct
1250 * @data: IOA error data
1256 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1263 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1264 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1266 for (i = 0; i < len / 4; i += 4) {
1267 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1268 be32_to_cpu(data[i]),
1269 be32_to_cpu(data[i+1]),
1270 be32_to_cpu(data[i+2]),
1271 be32_to_cpu(data[i+3]));
1276 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1277 * @ioa_cfg: ioa config struct
1278 * @hostrcb: hostrcb struct
1283 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1284 struct ipr_hostrcb *hostrcb)
1286 struct ipr_hostrcb_type_17_error *error;
1288 error = &hostrcb->hcam.u.error.u.type_17_error;
1289 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1291 ipr_err("%s\n", error->failure_reason);
1292 ipr_err("Remote Adapter VPD:\n");
1293 ipr_log_ext_vpd(&error->vpd);
1294 ipr_log_hex_data(ioa_cfg, error->data,
1295 be32_to_cpu(hostrcb->hcam.length) -
1296 (offsetof(struct ipr_hostrcb_error, u) +
1297 offsetof(struct ipr_hostrcb_type_17_error, data)));
1301 * ipr_log_dual_ioa_error - Log a dual adapter error.
1302 * @ioa_cfg: ioa config struct
1303 * @hostrcb: hostrcb struct
1308 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1309 struct ipr_hostrcb *hostrcb)
1311 struct ipr_hostrcb_type_07_error *error;
1313 error = &hostrcb->hcam.u.error.u.type_07_error;
1314 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1316 ipr_err("%s\n", error->failure_reason);
1317 ipr_err("Remote Adapter VPD:\n");
1318 ipr_log_vpd(&error->vpd);
1319 ipr_log_hex_data(ioa_cfg, error->data,
1320 be32_to_cpu(hostrcb->hcam.length) -
1321 (offsetof(struct ipr_hostrcb_error, u) +
1322 offsetof(struct ipr_hostrcb_type_07_error, data)));
1325 static const struct {
1328 } path_active_desc[] = {
1329 { IPR_PATH_NO_INFO, "Path" },
1330 { IPR_PATH_ACTIVE, "Active path" },
1331 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1334 static const struct {
1337 } path_state_desc[] = {
1338 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1339 { IPR_PATH_HEALTHY, "is healthy" },
1340 { IPR_PATH_DEGRADED, "is degraded" },
1341 { IPR_PATH_FAILED, "is failed" }
1345 * ipr_log_fabric_path - Log a fabric path error
1346 * @hostrcb: hostrcb struct
1347 * @fabric: fabric descriptor
1352 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1353 struct ipr_hostrcb_fabric_desc *fabric)
1356 u8 path_state = fabric->path_state;
1357 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1358 u8 state = path_state & IPR_PATH_STATE_MASK;
1360 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1361 if (path_active_desc[i].active != active)
1364 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1365 if (path_state_desc[j].state != state)
1368 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1369 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1370 path_active_desc[i].desc, path_state_desc[j].desc,
1372 } else if (fabric->cascaded_expander == 0xff) {
1373 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1374 path_active_desc[i].desc, path_state_desc[j].desc,
1375 fabric->ioa_port, fabric->phy);
1376 } else if (fabric->phy == 0xff) {
1377 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1378 path_active_desc[i].desc, path_state_desc[j].desc,
1379 fabric->ioa_port, fabric->cascaded_expander);
1381 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1382 path_active_desc[i].desc, path_state_desc[j].desc,
1383 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1389 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1390 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1393 static const struct {
1396 } path_type_desc[] = {
1397 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1398 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1399 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1400 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1403 static const struct {
1406 } path_status_desc[] = {
1407 { IPR_PATH_CFG_NO_PROB, "Functional" },
1408 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1409 { IPR_PATH_CFG_FAILED, "Failed" },
1410 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1411 { IPR_PATH_NOT_DETECTED, "Missing" },
1412 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1415 static const char *link_rate[] = {
1418 "phy reset problem",
1435 * ipr_log_path_elem - Log a fabric path element.
1436 * @hostrcb: hostrcb struct
1437 * @cfg: fabric path element struct
1442 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1443 struct ipr_hostrcb_config_element *cfg)
1446 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1447 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1449 if (type == IPR_PATH_CFG_NOT_EXIST)
1452 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1453 if (path_type_desc[i].type != type)
1456 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1457 if (path_status_desc[j].status != status)
1460 if (type == IPR_PATH_CFG_IOA_PORT) {
1461 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1462 path_status_desc[j].desc, path_type_desc[i].desc,
1463 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1464 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1466 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1467 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1468 path_status_desc[j].desc, path_type_desc[i].desc,
1469 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1470 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1471 } else if (cfg->cascaded_expander == 0xff) {
1472 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1473 "WWN=%08X%08X\n", path_status_desc[j].desc,
1474 path_type_desc[i].desc, cfg->phy,
1475 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1476 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1477 } else if (cfg->phy == 0xff) {
1478 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1479 "WWN=%08X%08X\n", path_status_desc[j].desc,
1480 path_type_desc[i].desc, cfg->cascaded_expander,
1481 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1482 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1484 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1485 "WWN=%08X%08X\n", path_status_desc[j].desc,
1486 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1487 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1488 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1495 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1496 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1497 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1498 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1502 * ipr_log_fabric_error - Log a fabric error.
1503 * @ioa_cfg: ioa config struct
1504 * @hostrcb: hostrcb struct
1509 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1510 struct ipr_hostrcb *hostrcb)
1512 struct ipr_hostrcb_type_20_error *error;
1513 struct ipr_hostrcb_fabric_desc *fabric;
1514 struct ipr_hostrcb_config_element *cfg;
1517 error = &hostrcb->hcam.u.error.u.type_20_error;
1518 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1519 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1521 add_len = be32_to_cpu(hostrcb->hcam.length) -
1522 (offsetof(struct ipr_hostrcb_error, u) +
1523 offsetof(struct ipr_hostrcb_type_20_error, desc));
1525 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1526 ipr_log_fabric_path(hostrcb, fabric);
1527 for_each_fabric_cfg(fabric, cfg)
1528 ipr_log_path_elem(hostrcb, cfg);
1530 add_len -= be16_to_cpu(fabric->length);
1531 fabric = (struct ipr_hostrcb_fabric_desc *)
1532 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1535 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1539 * ipr_log_generic_error - Log an adapter error.
1540 * @ioa_cfg: ioa config struct
1541 * @hostrcb: hostrcb struct
1546 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1547 struct ipr_hostrcb *hostrcb)
1549 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1550 be32_to_cpu(hostrcb->hcam.length));
1554 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1557 * This function will return the index of into the ipr_error_table
1558 * for the specified IOASC. If the IOASC is not in the table,
1559 * 0 will be returned, which points to the entry used for unknown errors.
1562 * index into the ipr_error_table
1564 static u32 ipr_get_error(u32 ioasc)
1568 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1569 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1576 * ipr_handle_log_data - Log an adapter error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1580 * This function logs an adapter error to the system.
1585 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1586 struct ipr_hostrcb *hostrcb)
1591 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1594 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1595 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1597 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1599 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1600 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1601 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1602 scsi_report_bus_reset(ioa_cfg->host,
1603 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1606 error_index = ipr_get_error(ioasc);
1608 if (!ipr_error_table[error_index].log_hcam)
1611 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1613 /* Set indication we have logged an error */
1614 ioa_cfg->errors_logged++;
1616 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1618 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1619 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1621 switch (hostrcb->hcam.overlay_id) {
1622 case IPR_HOST_RCB_OVERLAY_ID_2:
1623 ipr_log_cache_error(ioa_cfg, hostrcb);
1625 case IPR_HOST_RCB_OVERLAY_ID_3:
1626 ipr_log_config_error(ioa_cfg, hostrcb);
1628 case IPR_HOST_RCB_OVERLAY_ID_4:
1629 case IPR_HOST_RCB_OVERLAY_ID_6:
1630 ipr_log_array_error(ioa_cfg, hostrcb);
1632 case IPR_HOST_RCB_OVERLAY_ID_7:
1633 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1635 case IPR_HOST_RCB_OVERLAY_ID_12:
1636 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1638 case IPR_HOST_RCB_OVERLAY_ID_13:
1639 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1641 case IPR_HOST_RCB_OVERLAY_ID_14:
1642 case IPR_HOST_RCB_OVERLAY_ID_16:
1643 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1645 case IPR_HOST_RCB_OVERLAY_ID_17:
1646 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1648 case IPR_HOST_RCB_OVERLAY_ID_20:
1649 ipr_log_fabric_error(ioa_cfg, hostrcb);
1651 case IPR_HOST_RCB_OVERLAY_ID_1:
1652 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1654 ipr_log_generic_error(ioa_cfg, hostrcb);
1660 * ipr_process_error - Op done function for an adapter error log.
1661 * @ipr_cmd: ipr command struct
1663 * This function is the op done function for an error log host
1664 * controlled async from the adapter. It will log the error and
1665 * send the HCAM back to the adapter.
1670 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1673 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1674 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1676 list_del(&hostrcb->queue);
1677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1680 ipr_handle_log_data(ioa_cfg, hostrcb);
1681 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1682 dev_err(&ioa_cfg->pdev->dev,
1683 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1686 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1690 * ipr_timeout - An internally generated op has timed out.
1691 * @ipr_cmd: ipr command struct
1693 * This function blocks host requests and initiates an
1699 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1701 unsigned long lock_flags = 0;
1702 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1705 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1707 ioa_cfg->errors_logged++;
1708 dev_err(&ioa_cfg->pdev->dev,
1709 "Adapter being reset due to command timeout.\n");
1711 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1712 ioa_cfg->sdt_state = GET_DUMP;
1714 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1715 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1717 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1722 * ipr_oper_timeout - Adapter timed out transitioning to operational
1723 * @ipr_cmd: ipr command struct
1725 * This function blocks host requests and initiates an
1731 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1733 unsigned long lock_flags = 0;
1734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1737 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1739 ioa_cfg->errors_logged++;
1740 dev_err(&ioa_cfg->pdev->dev,
1741 "Adapter timed out transitioning to operational.\n");
1743 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1744 ioa_cfg->sdt_state = GET_DUMP;
1746 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1748 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1749 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1752 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1757 * ipr_reset_reload - Reset/Reload the IOA
1758 * @ioa_cfg: ioa config struct
1759 * @shutdown_type: shutdown type
1761 * This function resets the adapter and re-initializes it.
1762 * This function assumes that all new host commands have been stopped.
1766 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1767 enum ipr_shutdown_type shutdown_type)
1769 if (!ioa_cfg->in_reset_reload)
1770 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1772 spin_unlock_irq(ioa_cfg->host->host_lock);
1773 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1774 spin_lock_irq(ioa_cfg->host->host_lock);
1776 /* If we got hit with a host reset while we were already resetting
1777 the adapter for some reason, and the reset failed. */
1778 if (ioa_cfg->ioa_is_dead) {
1787 * ipr_find_ses_entry - Find matching SES in SES table
1788 * @res: resource entry struct of SES
1791 * pointer to SES table entry / NULL on failure
1793 static const struct ipr_ses_table_entry *
1794 ipr_find_ses_entry(struct ipr_resource_entry *res)
1797 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1799 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1800 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1801 if (ste->compare_product_id_byte[j] == 'X') {
1802 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1810 if (matches == IPR_PROD_ID_LEN)
1818 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1819 * @ioa_cfg: ioa config struct
1821 * @bus_width: bus width
1824 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1825 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1826 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1827 * max 160MHz = max 320MB/sec).
1829 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1831 struct ipr_resource_entry *res;
1832 const struct ipr_ses_table_entry *ste;
1833 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1835 /* Loop through each config table entry in the config table buffer */
1836 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1837 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1840 if (bus != res->cfgte.res_addr.bus)
1843 if (!(ste = ipr_find_ses_entry(res)))
1846 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1849 return max_xfer_rate;
1853 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1854 * @ioa_cfg: ioa config struct
1855 * @max_delay: max delay in micro-seconds to wait
1857 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1860 * 0 on success / other on failure
1862 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1864 volatile u32 pcii_reg;
1867 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1868 while (delay < max_delay) {
1869 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1871 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1874 /* udelay cannot be used if delay is more than a few milliseconds */
1875 if ((delay / 1000) > MAX_UDELAY_MS)
1876 mdelay(delay / 1000);
1886 * ipr_get_ldump_data_section - Dump IOA memory
1887 * @ioa_cfg: ioa config struct
1888 * @start_addr: adapter address to dump
1889 * @dest: destination kernel buffer
1890 * @length_in_words: length to dump in 4 byte words
1893 * 0 on success / -EIO on failure
1895 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1897 __be32 *dest, u32 length_in_words)
1899 volatile u32 temp_pcii_reg;
1902 /* Write IOA interrupt reg starting LDUMP state */
1903 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1904 ioa_cfg->regs.set_uproc_interrupt_reg);
1906 /* Wait for IO debug acknowledge */
1907 if (ipr_wait_iodbg_ack(ioa_cfg,
1908 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1909 dev_err(&ioa_cfg->pdev->dev,
1910 "IOA dump long data transfer timeout\n");
1914 /* Signal LDUMP interlocked - clear IO debug ack */
1915 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1916 ioa_cfg->regs.clr_interrupt_reg);
1918 /* Write Mailbox with starting address */
1919 writel(start_addr, ioa_cfg->ioa_mailbox);
1921 /* Signal address valid - clear IOA Reset alert */
1922 writel(IPR_UPROCI_RESET_ALERT,
1923 ioa_cfg->regs.clr_uproc_interrupt_reg);
1925 for (i = 0; i < length_in_words; i++) {
1926 /* Wait for IO debug acknowledge */
1927 if (ipr_wait_iodbg_ack(ioa_cfg,
1928 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1929 dev_err(&ioa_cfg->pdev->dev,
1930 "IOA dump short data transfer timeout\n");
1934 /* Read data from mailbox and increment destination pointer */
1935 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1938 /* For all but the last word of data, signal data received */
1939 if (i < (length_in_words - 1)) {
1940 /* Signal dump data received - Clear IO debug Ack */
1941 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1942 ioa_cfg->regs.clr_interrupt_reg);
1946 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1947 writel(IPR_UPROCI_RESET_ALERT,
1948 ioa_cfg->regs.set_uproc_interrupt_reg);
1950 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1951 ioa_cfg->regs.clr_uproc_interrupt_reg);
1953 /* Signal dump data received - Clear IO debug Ack */
1954 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1955 ioa_cfg->regs.clr_interrupt_reg);
1957 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1958 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1960 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1962 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1972 #ifdef CONFIG_SCSI_IPR_DUMP
1974 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1975 * @ioa_cfg: ioa config struct
1976 * @pci_address: adapter address
1977 * @length: length of data to copy
1979 * Copy data from PCI adapter to kernel buffer.
1980 * Note: length MUST be a 4 byte multiple
1982 * 0 on success / other on failure
1984 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1985 unsigned long pci_address, u32 length)
1987 int bytes_copied = 0;
1988 int cur_len, rc, rem_len, rem_page_len;
1990 unsigned long lock_flags = 0;
1991 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1993 while (bytes_copied < length &&
1994 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1995 if (ioa_dump->page_offset >= PAGE_SIZE ||
1996 ioa_dump->page_offset == 0) {
1997 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2001 return bytes_copied;
2004 ioa_dump->page_offset = 0;
2005 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2006 ioa_dump->next_page_index++;
2008 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2010 rem_len = length - bytes_copied;
2011 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2012 cur_len = min(rem_len, rem_page_len);
2014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2015 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2018 rc = ipr_get_ldump_data_section(ioa_cfg,
2019 pci_address + bytes_copied,
2020 &page[ioa_dump->page_offset / 4],
2021 (cur_len / sizeof(u32)));
2023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2026 ioa_dump->page_offset += cur_len;
2027 bytes_copied += cur_len;
2035 return bytes_copied;
2039 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2040 * @hdr: dump entry header struct
2045 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2047 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2049 hdr->offset = sizeof(*hdr);
2050 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2054 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2055 * @ioa_cfg: ioa config struct
2056 * @driver_dump: driver dump struct
2061 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2062 struct ipr_driver_dump *driver_dump)
2064 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2066 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2067 driver_dump->ioa_type_entry.hdr.len =
2068 sizeof(struct ipr_dump_ioa_type_entry) -
2069 sizeof(struct ipr_dump_entry_header);
2070 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2071 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2072 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2073 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2074 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2075 ucode_vpd->minor_release[1];
2076 driver_dump->hdr.num_entries++;
2080 * ipr_dump_version_data - Fill in the driver version in the dump.
2081 * @ioa_cfg: ioa config struct
2082 * @driver_dump: driver dump struct
2087 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2088 struct ipr_driver_dump *driver_dump)
2090 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2091 driver_dump->version_entry.hdr.len =
2092 sizeof(struct ipr_dump_version_entry) -
2093 sizeof(struct ipr_dump_entry_header);
2094 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2095 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2096 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2097 driver_dump->hdr.num_entries++;
2101 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2102 * @ioa_cfg: ioa config struct
2103 * @driver_dump: driver dump struct
2108 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2109 struct ipr_driver_dump *driver_dump)
2111 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2112 driver_dump->trace_entry.hdr.len =
2113 sizeof(struct ipr_dump_trace_entry) -
2114 sizeof(struct ipr_dump_entry_header);
2115 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2116 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2117 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2118 driver_dump->hdr.num_entries++;
2122 * ipr_dump_location_data - Fill in the IOA location in the dump.
2123 * @ioa_cfg: ioa config struct
2124 * @driver_dump: driver dump struct
2129 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2130 struct ipr_driver_dump *driver_dump)
2132 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2133 driver_dump->location_entry.hdr.len =
2134 sizeof(struct ipr_dump_location_entry) -
2135 sizeof(struct ipr_dump_entry_header);
2136 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2137 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2138 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2139 driver_dump->hdr.num_entries++;
2143 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2144 * @ioa_cfg: ioa config struct
2145 * @dump: dump struct
2150 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2152 unsigned long start_addr, sdt_word;
2153 unsigned long lock_flags = 0;
2154 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2155 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2156 u32 num_entries, start_off, end_off;
2157 u32 bytes_to_copy, bytes_copied, rc;
2158 struct ipr_sdt *sdt;
2163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2165 if (ioa_cfg->sdt_state != GET_DUMP) {
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170 start_addr = readl(ioa_cfg->ioa_mailbox);
2172 if (!ipr_sdt_is_fmt2(start_addr)) {
2173 dev_err(&ioa_cfg->pdev->dev,
2174 "Invalid dump table format: %lx\n", start_addr);
2175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2179 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2181 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2183 /* Initialize the overall dump header */
2184 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2185 driver_dump->hdr.num_entries = 1;
2186 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2187 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2188 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2189 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2191 ipr_dump_version_data(ioa_cfg, driver_dump);
2192 ipr_dump_location_data(ioa_cfg, driver_dump);
2193 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2194 ipr_dump_trace_data(ioa_cfg, driver_dump);
2196 /* Update dump_header */
2197 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2199 /* IOA Dump entry */
2200 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2201 ioa_dump->format = IPR_SDT_FMT2;
2202 ioa_dump->hdr.len = 0;
2203 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2204 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2206 /* First entries in sdt are actually a list of dump addresses and
2207 lengths to gather the real dump data. sdt represents the pointer
2208 to the ioa generated dump table. Dump data will be extracted based
2209 on entries in this table */
2210 sdt = &ioa_dump->sdt;
2212 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2213 sizeof(struct ipr_sdt) / sizeof(__be32));
2215 /* Smart Dump table is ready to use and the first entry is valid */
2216 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2217 dev_err(&ioa_cfg->pdev->dev,
2218 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2219 rc, be32_to_cpu(sdt->hdr.state));
2220 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2221 ioa_cfg->sdt_state = DUMP_OBTAINED;
2222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2226 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2228 if (num_entries > IPR_NUM_SDT_ENTRIES)
2229 num_entries = IPR_NUM_SDT_ENTRIES;
2231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2233 for (i = 0; i < num_entries; i++) {
2234 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2235 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2239 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2240 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2241 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2242 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2244 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2245 bytes_to_copy = end_off - start_off;
2246 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2247 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2251 /* Copy data from adapter to driver buffers */
2252 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2255 ioa_dump->hdr.len += bytes_copied;
2257 if (bytes_copied != bytes_to_copy) {
2258 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2265 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2267 /* Update dump_header */
2268 driver_dump->hdr.len += ioa_dump->hdr.len;
2270 ioa_cfg->sdt_state = DUMP_OBTAINED;
2275 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2279 * ipr_release_dump - Free adapter dump memory
2280 * @kref: kref struct
2285 static void ipr_release_dump(struct kref *kref)
2287 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2288 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2289 unsigned long lock_flags = 0;
2293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2294 ioa_cfg->dump = NULL;
2295 ioa_cfg->sdt_state = INACTIVE;
2296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2298 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2299 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2306 * ipr_worker_thread - Worker thread
2307 * @work: ioa config struct
2309 * Called at task level from a work thread. This function takes care
2310 * of adding and removing device from the mid-layer as configuration
2311 * changes are detected by the adapter.
2316 static void ipr_worker_thread(struct work_struct *work)
2318 unsigned long lock_flags;
2319 struct ipr_resource_entry *res;
2320 struct scsi_device *sdev;
2321 struct ipr_dump *dump;
2322 struct ipr_ioa_cfg *ioa_cfg =
2323 container_of(work, struct ipr_ioa_cfg, work_q);
2324 u8 bus, target, lun;
2328 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2330 if (ioa_cfg->sdt_state == GET_DUMP) {
2331 dump = ioa_cfg->dump;
2333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2336 kref_get(&dump->kref);
2337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2338 ipr_get_ioa_dump(ioa_cfg, dump);
2339 kref_put(&dump->kref, ipr_release_dump);
2341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2342 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2343 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2351 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2356 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2357 if (res->del_from_ml && res->sdev) {
2360 if (!scsi_device_get(sdev)) {
2361 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2363 scsi_remove_device(sdev);
2364 scsi_device_put(sdev);
2365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2372 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2373 if (res->add_to_ml) {
2374 bus = res->cfgte.res_addr.bus;
2375 target = res->cfgte.res_addr.target;
2376 lun = res->cfgte.res_addr.lun;
2378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2379 scsi_add_device(ioa_cfg->host, bus, target, lun);
2380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2386 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2390 #ifdef CONFIG_SCSI_IPR_TRACE
2392 * ipr_read_trace - Dump the adapter trace
2393 * @kobj: kobject struct
2396 * @count: buffer size
2399 * number of bytes printed to buffer
2401 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2402 loff_t off, size_t count)
2404 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2405 struct Scsi_Host *shost = class_to_shost(cdev);
2406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2407 unsigned long lock_flags = 0;
2408 int size = IPR_TRACE_SIZE;
2409 char *src = (char *)ioa_cfg->trace;
2413 if (off + count > size) {
2418 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2419 memcpy(buf, &src[off], count);
2420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2424 static struct bin_attribute ipr_trace_attr = {
2430 .read = ipr_read_trace,
2434 static const struct {
2435 enum ipr_cache_state state;
2437 } cache_state [] = {
2438 { CACHE_NONE, "none" },
2439 { CACHE_DISABLED, "disabled" },
2440 { CACHE_ENABLED, "enabled" }
2444 * ipr_show_write_caching - Show the write caching attribute
2445 * @class_dev: class device struct
2449 * number of bytes printed to buffer
2451 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2453 struct Scsi_Host *shost = class_to_shost(class_dev);
2454 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2455 unsigned long lock_flags = 0;
2458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2459 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2460 if (cache_state[i].state == ioa_cfg->cache_state) {
2461 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471 * ipr_store_write_caching - Enable/disable adapter write cache
2472 * @class_dev: class_device struct
2474 * @count: buffer size
2476 * This function will enable/disable adapter write cache.
2479 * count on success / other on failure
2481 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2482 const char *buf, size_t count)
2484 struct Scsi_Host *shost = class_to_shost(class_dev);
2485 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2486 unsigned long lock_flags = 0;
2487 enum ipr_cache_state new_state = CACHE_INVALID;
2490 if (!capable(CAP_SYS_ADMIN))
2492 if (ioa_cfg->cache_state == CACHE_NONE)
2495 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2496 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2497 new_state = cache_state[i].state;
2502 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2506 if (ioa_cfg->cache_state == new_state) {
2507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511 ioa_cfg->cache_state = new_state;
2512 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2513 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2514 if (!ioa_cfg->in_reset_reload)
2515 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2517 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2522 static struct class_device_attribute ipr_ioa_cache_attr = {
2524 .name = "write_cache",
2525 .mode = S_IRUGO | S_IWUSR,
2527 .show = ipr_show_write_caching,
2528 .store = ipr_store_write_caching
2532 * ipr_show_fw_version - Show the firmware version
2533 * @class_dev: class device struct
2537 * number of bytes printed to buffer
2539 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2541 struct Scsi_Host *shost = class_to_shost(class_dev);
2542 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2543 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2544 unsigned long lock_flags = 0;
2547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2548 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2549 ucode_vpd->major_release, ucode_vpd->card_type,
2550 ucode_vpd->minor_release[0],
2551 ucode_vpd->minor_release[1]);
2552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556 static struct class_device_attribute ipr_fw_version_attr = {
2558 .name = "fw_version",
2561 .show = ipr_show_fw_version,
2565 * ipr_show_log_level - Show the adapter's error logging level
2566 * @class_dev: class device struct
2570 * number of bytes printed to buffer
2572 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2574 struct Scsi_Host *shost = class_to_shost(class_dev);
2575 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2576 unsigned long lock_flags = 0;
2579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2580 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2586 * ipr_store_log_level - Change the adapter's error logging level
2587 * @class_dev: class device struct
2591 * number of bytes printed to buffer
2593 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2594 const char *buf, size_t count)
2596 struct Scsi_Host *shost = class_to_shost(class_dev);
2597 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2598 unsigned long lock_flags = 0;
2600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2601 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2606 static struct class_device_attribute ipr_log_level_attr = {
2608 .name = "log_level",
2609 .mode = S_IRUGO | S_IWUSR,
2611 .show = ipr_show_log_level,
2612 .store = ipr_store_log_level
2616 * ipr_store_diagnostics - IOA Diagnostics interface
2617 * @class_dev: class_device struct
2619 * @count: buffer size
2621 * This function will reset the adapter and wait a reasonable
2622 * amount of time for any errors that the adapter might log.
2625 * count on success / other on failure
2627 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2628 const char *buf, size_t count)
2630 struct Scsi_Host *shost = class_to_shost(class_dev);
2631 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2632 unsigned long lock_flags = 0;
2635 if (!capable(CAP_SYS_ADMIN))
2638 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2640 ioa_cfg->errors_logged = 0;
2641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2643 if (ioa_cfg->in_reset_reload) {
2644 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2645 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2647 /* Wait for a second for any errors to be logged */
2650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2655 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2662 static struct class_device_attribute ipr_diagnostics_attr = {
2664 .name = "run_diagnostics",
2667 .store = ipr_store_diagnostics
2671 * ipr_show_adapter_state - Show the adapter's state
2672 * @class_dev: class device struct
2676 * number of bytes printed to buffer
2678 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2680 struct Scsi_Host *shost = class_to_shost(class_dev);
2681 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2682 unsigned long lock_flags = 0;
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2686 if (ioa_cfg->ioa_is_dead)
2687 len = snprintf(buf, PAGE_SIZE, "offline\n");
2689 len = snprintf(buf, PAGE_SIZE, "online\n");
2690 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2695 * ipr_store_adapter_state - Change adapter state
2696 * @class_dev: class_device struct
2698 * @count: buffer size
2700 * This function will change the adapter's state.
2703 * count on success / other on failure
2705 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2706 const char *buf, size_t count)
2708 struct Scsi_Host *shost = class_to_shost(class_dev);
2709 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2710 unsigned long lock_flags;
2713 if (!capable(CAP_SYS_ADMIN))
2716 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2717 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2718 ioa_cfg->ioa_is_dead = 0;
2719 ioa_cfg->reset_retries = 0;
2720 ioa_cfg->in_ioa_bringdown = 0;
2721 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2729 static struct class_device_attribute ipr_ioa_state_attr = {
2732 .mode = S_IRUGO | S_IWUSR,
2734 .show = ipr_show_adapter_state,
2735 .store = ipr_store_adapter_state
2739 * ipr_store_reset_adapter - Reset the adapter
2740 * @class_dev: class_device struct
2742 * @count: buffer size
2744 * This function will reset the adapter.
2747 * count on success / other on failure
2749 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2750 const char *buf, size_t count)
2752 struct Scsi_Host *shost = class_to_shost(class_dev);
2753 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2754 unsigned long lock_flags;
2757 if (!capable(CAP_SYS_ADMIN))
2760 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2761 if (!ioa_cfg->in_reset_reload)
2762 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2764 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2769 static struct class_device_attribute ipr_ioa_reset_attr = {
2771 .name = "reset_host",
2774 .store = ipr_store_reset_adapter
2778 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2779 * @buf_len: buffer length
2781 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2782 * list to use for microcode download
2785 * pointer to sglist / NULL on failure
2787 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2789 int sg_size, order, bsize_elem, num_elem, i, j;
2790 struct ipr_sglist *sglist;
2791 struct scatterlist *scatterlist;
2794 /* Get the minimum size per scatter/gather element */
2795 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2797 /* Get the actual size per element */
2798 order = get_order(sg_size);
2800 /* Determine the actual number of bytes per element */
2801 bsize_elem = PAGE_SIZE * (1 << order);
2803 /* Determine the actual number of sg entries needed */
2804 if (buf_len % bsize_elem)
2805 num_elem = (buf_len / bsize_elem) + 1;
2807 num_elem = buf_len / bsize_elem;
2809 /* Allocate a scatter/gather list for the DMA */
2810 sglist = kzalloc(sizeof(struct ipr_sglist) +
2811 (sizeof(struct scatterlist) * (num_elem - 1)),
2814 if (sglist == NULL) {
2819 scatterlist = sglist->scatterlist;
2821 sglist->order = order;
2822 sglist->num_sg = num_elem;
2824 /* Allocate a bunch of sg elements */
2825 for (i = 0; i < num_elem; i++) {
2826 page = alloc_pages(GFP_KERNEL, order);
2830 /* Free up what we already allocated */
2831 for (j = i - 1; j >= 0; j--)
2832 __free_pages(scatterlist[j].page, order);
2837 scatterlist[i].page = page;
2844 * ipr_free_ucode_buffer - Frees a microcode download buffer
2845 * @p_dnld: scatter/gather list pointer
2847 * Free a DMA'able ucode download buffer previously allocated with
2848 * ipr_alloc_ucode_buffer
2853 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2857 for (i = 0; i < sglist->num_sg; i++)
2858 __free_pages(sglist->scatterlist[i].page, sglist->order);
2864 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2865 * @sglist: scatter/gather list pointer
2866 * @buffer: buffer pointer
2867 * @len: buffer length
2869 * Copy a microcode image from a user buffer into a buffer allocated by
2870 * ipr_alloc_ucode_buffer
2873 * 0 on success / other on failure
2875 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2876 u8 *buffer, u32 len)
2878 int bsize_elem, i, result = 0;
2879 struct scatterlist *scatterlist;
2882 /* Determine the actual number of bytes per element */
2883 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2885 scatterlist = sglist->scatterlist;
2887 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2888 kaddr = kmap(scatterlist[i].page);
2889 memcpy(kaddr, buffer, bsize_elem);
2890 kunmap(scatterlist[i].page);
2892 scatterlist[i].length = bsize_elem;
2900 if (len % bsize_elem) {
2901 kaddr = kmap(scatterlist[i].page);
2902 memcpy(kaddr, buffer, len % bsize_elem);
2903 kunmap(scatterlist[i].page);
2905 scatterlist[i].length = len % bsize_elem;
2908 sglist->buffer_len = len;
2913 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2914 * @ipr_cmd: ipr command struct
2915 * @sglist: scatter/gather list
2917 * Builds a microcode download IOA data list (IOADL).
2920 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2921 struct ipr_sglist *sglist)
2923 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2924 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2925 struct scatterlist *scatterlist = sglist->scatterlist;
2928 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2929 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2930 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2931 ioarcb->write_ioadl_len =
2932 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2934 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2935 ioadl[i].flags_and_data_len =
2936 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2938 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2941 ioadl[i-1].flags_and_data_len |=
2942 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2946 * ipr_update_ioa_ucode - Update IOA's microcode
2947 * @ioa_cfg: ioa config struct
2948 * @sglist: scatter/gather list
2950 * Initiate an adapter reset to update the IOA's microcode
2953 * 0 on success / -EIO on failure
2955 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2956 struct ipr_sglist *sglist)
2958 unsigned long lock_flags;
2960 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2962 if (ioa_cfg->ucode_sglist) {
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964 dev_err(&ioa_cfg->pdev->dev,
2965 "Microcode download already in progress\n");
2969 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2970 sglist->num_sg, DMA_TO_DEVICE);
2972 if (!sglist->num_dma_sg) {
2973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2974 dev_err(&ioa_cfg->pdev->dev,
2975 "Failed to map microcode download buffer!\n");
2979 ioa_cfg->ucode_sglist = sglist;
2980 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2982 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2984 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2985 ioa_cfg->ucode_sglist = NULL;
2986 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2991 * ipr_store_update_fw - Update the firmware on the adapter
2992 * @class_dev: class_device struct
2994 * @count: buffer size
2996 * This function will update the firmware on the adapter.
2999 * count on success / other on failure
3001 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3002 const char *buf, size_t count)
3004 struct Scsi_Host *shost = class_to_shost(class_dev);
3005 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3006 struct ipr_ucode_image_header *image_hdr;
3007 const struct firmware *fw_entry;
3008 struct ipr_sglist *sglist;
3011 int len, result, dnld_size;
3013 if (!capable(CAP_SYS_ADMIN))
3016 len = snprintf(fname, 99, "%s", buf);
3017 fname[len-1] = '\0';
3019 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3020 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3024 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3026 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3027 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3028 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3029 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3030 release_firmware(fw_entry);
3034 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3035 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3036 sglist = ipr_alloc_ucode_buffer(dnld_size);
3039 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3040 release_firmware(fw_entry);
3044 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3047 dev_err(&ioa_cfg->pdev->dev,
3048 "Microcode buffer copy to DMA buffer failed\n");
3052 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3057 ipr_free_ucode_buffer(sglist);
3058 release_firmware(fw_entry);
3062 static struct class_device_attribute ipr_update_fw_attr = {
3064 .name = "update_fw",
3067 .store = ipr_store_update_fw
3070 static struct class_device_attribute *ipr_ioa_attrs[] = {
3071 &ipr_fw_version_attr,
3072 &ipr_log_level_attr,
3073 &ipr_diagnostics_attr,
3074 &ipr_ioa_state_attr,
3075 &ipr_ioa_reset_attr,
3076 &ipr_update_fw_attr,
3077 &ipr_ioa_cache_attr,
3081 #ifdef CONFIG_SCSI_IPR_DUMP
3083 * ipr_read_dump - Dump the adapter
3084 * @kobj: kobject struct
3087 * @count: buffer size
3090 * number of bytes printed to buffer
3092 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3093 loff_t off, size_t count)
3095 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3096 struct Scsi_Host *shost = class_to_shost(cdev);
3097 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3098 struct ipr_dump *dump;
3099 unsigned long lock_flags = 0;
3104 if (!capable(CAP_SYS_ADMIN))
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 dump = ioa_cfg->dump;
3110 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3114 kref_get(&dump->kref);
3115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117 if (off > dump->driver_dump.hdr.len) {
3118 kref_put(&dump->kref, ipr_release_dump);
3122 if (off + count > dump->driver_dump.hdr.len) {
3123 count = dump->driver_dump.hdr.len - off;
3127 if (count && off < sizeof(dump->driver_dump)) {
3128 if (off + count > sizeof(dump->driver_dump))
3129 len = sizeof(dump->driver_dump) - off;
3132 src = (u8 *)&dump->driver_dump + off;
3133 memcpy(buf, src, len);
3139 off -= sizeof(dump->driver_dump);
3141 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3142 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3143 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3146 src = (u8 *)&dump->ioa_dump + off;
3147 memcpy(buf, src, len);
3153 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3156 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3157 len = PAGE_ALIGN(off) - off;
3160 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3161 src += off & ~PAGE_MASK;
3162 memcpy(buf, src, len);
3168 kref_put(&dump->kref, ipr_release_dump);
3173 * ipr_alloc_dump - Prepare for adapter dump
3174 * @ioa_cfg: ioa config struct
3177 * 0 on success / other on failure
3179 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3181 struct ipr_dump *dump;
3182 unsigned long lock_flags = 0;
3184 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3187 ipr_err("Dump memory allocation failed\n");
3191 kref_init(&dump->kref);
3192 dump->ioa_cfg = ioa_cfg;
3194 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3196 if (INACTIVE != ioa_cfg->sdt_state) {
3197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3202 ioa_cfg->dump = dump;
3203 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3204 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3205 ioa_cfg->dump_taken = 1;
3206 schedule_work(&ioa_cfg->work_q);
3208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214 * ipr_free_dump - Free adapter dump memory
3215 * @ioa_cfg: ioa config struct
3218 * 0 on success / other on failure
3220 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3222 struct ipr_dump *dump;
3223 unsigned long lock_flags = 0;
3227 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3228 dump = ioa_cfg->dump;
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234 ioa_cfg->dump = NULL;
3235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3237 kref_put(&dump->kref, ipr_release_dump);
3244 * ipr_write_dump - Setup dump state of adapter
3245 * @kobj: kobject struct
3248 * @count: buffer size
3251 * number of bytes printed to buffer
3253 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3254 loff_t off, size_t count)
3256 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3257 struct Scsi_Host *shost = class_to_shost(cdev);
3258 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3261 if (!capable(CAP_SYS_ADMIN))
3265 rc = ipr_alloc_dump(ioa_cfg);
3266 else if (buf[0] == '0')
3267 rc = ipr_free_dump(ioa_cfg);
3277 static struct bin_attribute ipr_dump_attr = {
3280 .mode = S_IRUSR | S_IWUSR,
3283 .read = ipr_read_dump,
3284 .write = ipr_write_dump
3287 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3291 * ipr_change_queue_depth - Change the device's queue depth
3292 * @sdev: scsi device struct
3293 * @qdepth: depth to set
3298 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3300 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3301 struct ipr_resource_entry *res;
3302 unsigned long lock_flags = 0;
3304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3305 res = (struct ipr_resource_entry *)sdev->hostdata;
3307 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3308 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3311 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3312 return sdev->queue_depth;
3316 * ipr_change_queue_type - Change the device's queue type
3317 * @dsev: scsi device struct
3318 * @tag_type: type of tags to use
3321 * actual queue type set
3323 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3325 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3326 struct ipr_resource_entry *res;
3327 unsigned long lock_flags = 0;
3329 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3330 res = (struct ipr_resource_entry *)sdev->hostdata;
3333 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3335 * We don't bother quiescing the device here since the
3336 * adapter firmware does it for us.
3338 scsi_set_tag_type(sdev, tag_type);
3341 scsi_activate_tcq(sdev, sdev->queue_depth);
3343 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3355 * @dev: device struct
3359 * number of bytes printed to buffer
3361 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3363 struct scsi_device *sdev = to_scsi_device(dev);
3364 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3365 struct ipr_resource_entry *res;
3366 unsigned long lock_flags = 0;
3367 ssize_t len = -ENXIO;
3369 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3370 res = (struct ipr_resource_entry *)sdev->hostdata;
3372 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 static struct device_attribute ipr_adapter_handle_attr = {
3379 .name = "adapter_handle",
3382 .show = ipr_show_adapter_handle
3385 static struct device_attribute *ipr_dev_attrs[] = {
3386 &ipr_adapter_handle_attr,
3391 * ipr_biosparam - Return the HSC mapping
3392 * @sdev: scsi device struct
3393 * @block_device: block device pointer
3394 * @capacity: capacity of the device
3395 * @parm: Array containing returned HSC values.
3397 * This function generates the HSC parms that fdisk uses.
3398 * We want to make sure we return something that places partitions
3399 * on 4k boundaries for best performance with the IOA.
3404 static int ipr_biosparam(struct scsi_device *sdev,
3405 struct block_device *block_device,
3406 sector_t capacity, int *parm)
3414 cylinders = capacity;
3415 sector_div(cylinders, (128 * 32));
3420 parm[2] = cylinders;
3426 * ipr_find_starget - Find target based on bus/target.
3427 * @starget: scsi target struct
3430 * resource entry pointer if found / NULL if not found
3432 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3434 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3436 struct ipr_resource_entry *res;
3438 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3439 if ((res->cfgte.res_addr.bus == starget->channel) &&
3440 (res->cfgte.res_addr.target == starget->id) &&
3441 (res->cfgte.res_addr.lun == 0)) {
3449 static struct ata_port_info sata_port_info;
3452 * ipr_target_alloc - Prepare for commands to a SCSI target
3453 * @starget: scsi target struct
3455 * If the device is a SATA device, this function allocates an
3456 * ATA port with libata, else it does nothing.
3459 * 0 on success / non-0 on failure
3461 static int ipr_target_alloc(struct scsi_target *starget)
3463 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3464 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3465 struct ipr_sata_port *sata_port;
3466 struct ata_port *ap;
3467 struct ipr_resource_entry *res;
3468 unsigned long lock_flags;
3470 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3471 res = ipr_find_starget(starget);
3472 starget->hostdata = NULL;
3474 if (res && ipr_is_gata(res)) {
3475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3476 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3480 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3482 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3483 sata_port->ioa_cfg = ioa_cfg;
3485 sata_port->res = res;
3487 res->sata_port = sata_port;
3488 ap->private_data = sata_port;
3489 starget->hostdata = sata_port;
3495 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3501 * ipr_target_destroy - Destroy a SCSI target
3502 * @starget: scsi target struct
3504 * If the device was a SATA device, this function frees the libata
3505 * ATA port, else it does nothing.
3508 static void ipr_target_destroy(struct scsi_target *starget)
3510 struct ipr_sata_port *sata_port = starget->hostdata;
3513 starget->hostdata = NULL;
3514 ata_sas_port_destroy(sata_port->ap);
3520 * ipr_find_sdev - Find device based on bus/target/lun.
3521 * @sdev: scsi device struct
3524 * resource entry pointer if found / NULL if not found
3526 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3529 struct ipr_resource_entry *res;
3531 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3532 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3533 (res->cfgte.res_addr.target == sdev->id) &&
3534 (res->cfgte.res_addr.lun == sdev->lun))
3542 * ipr_slave_destroy - Unconfigure a SCSI device
3543 * @sdev: scsi device struct
3548 static void ipr_slave_destroy(struct scsi_device *sdev)
3550 struct ipr_resource_entry *res;
3551 struct ipr_ioa_cfg *ioa_cfg;
3552 unsigned long lock_flags = 0;
3554 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3556 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557 res = (struct ipr_resource_entry *) sdev->hostdata;
3560 ata_port_disable(res->sata_port->ap);
3561 sdev->hostdata = NULL;
3563 res->sata_port = NULL;
3565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3569 * ipr_slave_configure - Configure a SCSI device
3570 * @sdev: scsi device struct
3572 * This function configures the specified scsi device.
3577 static int ipr_slave_configure(struct scsi_device *sdev)
3579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3580 struct ipr_resource_entry *res;
3581 unsigned long lock_flags = 0;
3583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3584 res = sdev->hostdata;
3586 if (ipr_is_af_dasd_device(res))
3587 sdev->type = TYPE_RAID;
3588 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3589 sdev->scsi_level = 4;
3590 sdev->no_uld_attach = 1;
3592 if (ipr_is_vset_device(res)) {
3593 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3594 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3596 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3597 sdev->allow_restart = 1;
3598 if (ipr_is_gata(res) && res->sata_port) {
3599 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3600 ata_sas_slave_configure(sdev, res->sata_port->ap);
3602 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3610 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3611 * @sdev: scsi device struct
3613 * This function initializes an ATA port so that future commands
3614 * sent through queuecommand will work.
3619 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3621 struct ipr_sata_port *sata_port = NULL;
3625 if (sdev->sdev_target)
3626 sata_port = sdev->sdev_target->hostdata;
3628 rc = ata_sas_port_init(sata_port->ap);
3630 ipr_slave_destroy(sdev);
3637 * ipr_slave_alloc - Prepare for commands to a device.
3638 * @sdev: scsi device struct
3640 * This function saves a pointer to the resource entry
3641 * in the scsi device struct if the device exists. We
3642 * can then use this pointer in ipr_queuecommand when
3643 * handling new commands.
3646 * 0 on success / -ENXIO if device does not exist
3648 static int ipr_slave_alloc(struct scsi_device *sdev)
3650 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3651 struct ipr_resource_entry *res;
3652 unsigned long lock_flags;
3655 sdev->hostdata = NULL;
3657 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3659 res = ipr_find_sdev(sdev);
3664 sdev->hostdata = res;
3665 if (!ipr_is_naca_model(res))
3666 res->needs_sync_complete = 1;
3668 if (ipr_is_gata(res)) {
3669 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3670 return ipr_ata_slave_alloc(sdev);
3674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 * ipr_eh_host_reset - Reset the host adapter
3681 * @scsi_cmd: scsi command struct
3686 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3688 struct ipr_ioa_cfg *ioa_cfg;
3692 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3694 dev_err(&ioa_cfg->pdev->dev,
3695 "Adapter being reset as a result of error recovery.\n");
3697 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3698 ioa_cfg->sdt_state = GET_DUMP;
3700 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3706 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3710 spin_lock_irq(cmd->device->host->host_lock);
3711 rc = __ipr_eh_host_reset(cmd);
3712 spin_unlock_irq(cmd->device->host->host_lock);
3718 * ipr_device_reset - Reset the device
3719 * @ioa_cfg: ioa config struct
3720 * @res: resource entry struct
3722 * This function issues a device reset to the affected device.
3723 * If the device is a SCSI device, a LUN reset will be sent
3724 * to the device first. If that does not work, a target reset
3725 * will be sent. If the device is a SATA device, a PHY reset will
3729 * 0 on success / non-zero on failure
3731 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3732 struct ipr_resource_entry *res)
3734 struct ipr_cmnd *ipr_cmd;
3735 struct ipr_ioarcb *ioarcb;
3736 struct ipr_cmd_pkt *cmd_pkt;
3737 struct ipr_ioarcb_ata_regs *regs;
3741 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3742 ioarcb = &ipr_cmd->ioarcb;
3743 cmd_pkt = &ioarcb->cmd_pkt;
3744 regs = &ioarcb->add_data.u.regs;
3746 ioarcb->res_handle = res->cfgte.res_handle;
3747 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3748 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3749 if (ipr_is_gata(res)) {
3750 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3751 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3752 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3755 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3756 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3757 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3758 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3759 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3760 sizeof(struct ipr_ioasa_gata));
3763 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3767 * ipr_sata_reset - Reset the SATA port
3768 * @ap: SATA port to reset
3769 * @classes: class of the attached device
3771 * This function issues a SATA phy reset to the affected ATA port.
3774 * 0 on success / non-zero on failure
3776 static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3777 unsigned long deadline)
3779 struct ipr_sata_port *sata_port = ap->private_data;
3780 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3781 struct ipr_resource_entry *res;
3782 unsigned long lock_flags = 0;
3786 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3787 while(ioa_cfg->in_reset_reload) {
3788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3789 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3790 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3793 res = sata_port->res;
3795 rc = ipr_device_reset(ioa_cfg, res);
3796 switch(res->cfgte.proto) {
3797 case IPR_PROTO_SATA:
3798 case IPR_PROTO_SAS_STP:
3799 *classes = ATA_DEV_ATA;
3801 case IPR_PROTO_SATA_ATAPI:
3802 case IPR_PROTO_SAS_STP_ATAPI:
3803 *classes = ATA_DEV_ATAPI;
3806 *classes = ATA_DEV_UNKNOWN;
3811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3817 * ipr_eh_dev_reset - Reset the device
3818 * @scsi_cmd: scsi command struct
3820 * This function issues a device reset to the affected device.
3821 * A LUN reset will be sent to the device first. If that does
3822 * not work, a target reset will be sent.
3827 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3829 struct ipr_cmnd *ipr_cmd;
3830 struct ipr_ioa_cfg *ioa_cfg;
3831 struct ipr_resource_entry *res;
3832 struct ata_port *ap;
3836 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3837 res = scsi_cmd->device->hostdata;
3843 * If we are currently going through reset/reload, return failed. This will force the
3844 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3847 if (ioa_cfg->in_reset_reload)
3849 if (ioa_cfg->ioa_is_dead)
3852 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3853 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3854 if (ipr_cmd->scsi_cmd)
3855 ipr_cmd->done = ipr_scsi_eh_done;
3857 ipr_cmd->done = ipr_sata_eh_done;
3858 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3859 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3860 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3865 res->resetting_device = 1;
3866 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3868 if (ipr_is_gata(res) && res->sata_port) {
3869 ap = res->sata_port->ap;
3870 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3871 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3872 spin_lock_irq(scsi_cmd->device->host->host_lock);
3874 rc = ipr_device_reset(ioa_cfg, res);
3875 res->resetting_device = 0;
3878 return (rc ? FAILED : SUCCESS);
3881 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3885 spin_lock_irq(cmd->device->host->host_lock);
3886 rc = __ipr_eh_dev_reset(cmd);
3887 spin_unlock_irq(cmd->device->host->host_lock);
3893 * ipr_bus_reset_done - Op done function for bus reset.
3894 * @ipr_cmd: ipr command struct
3896 * This function is the op done function for a bus reset
3901 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3903 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3904 struct ipr_resource_entry *res;
3907 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3908 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3909 sizeof(res->cfgte.res_handle))) {
3910 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3916 * If abort has not completed, indicate the reset has, else call the
3917 * abort's done function to wake the sleeping eh thread
3919 if (ipr_cmd->sibling->sibling)
3920 ipr_cmd->sibling->sibling = NULL;
3922 ipr_cmd->sibling->done(ipr_cmd->sibling);
3924 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3929 * ipr_abort_timeout - An abort task has timed out
3930 * @ipr_cmd: ipr command struct
3932 * This function handles when an abort task times out. If this
3933 * happens we issue a bus reset since we have resources tied
3934 * up that must be freed before returning to the midlayer.
3939 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3941 struct ipr_cmnd *reset_cmd;
3942 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3943 struct ipr_cmd_pkt *cmd_pkt;
3944 unsigned long lock_flags = 0;
3947 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3948 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3953 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3954 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3955 ipr_cmd->sibling = reset_cmd;
3956 reset_cmd->sibling = ipr_cmd;
3957 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3958 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3959 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3960 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3961 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3963 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3969 * ipr_cancel_op - Cancel specified op
3970 * @scsi_cmd: scsi command struct
3972 * This function cancels specified op.
3977 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3979 struct ipr_cmnd *ipr_cmd;
3980 struct ipr_ioa_cfg *ioa_cfg;
3981 struct ipr_resource_entry *res;
3982 struct ipr_cmd_pkt *cmd_pkt;
3987 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3988 res = scsi_cmd->device->hostdata;
3990 /* If we are currently going through reset/reload, return failed.
3991 * This will force the mid-layer to call ipr_eh_host_reset,
3992 * which will then go to sleep and wait for the reset to complete
3994 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3996 if (!res || !ipr_is_gscsi(res))
3999 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4000 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4001 ipr_cmd->done = ipr_scsi_eh_done;
4010 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4011 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4012 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4013 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4014 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4015 ipr_cmd->u.sdev = scsi_cmd->device;
4017 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4019 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4020 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4023 * If the abort task timed out and we sent a bus reset, we will get
4024 * one the following responses to the abort
4026 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4031 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4032 if (!ipr_is_naca_model(res))
4033 res->needs_sync_complete = 1;
4036 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4040 * ipr_eh_abort - Abort a single op
4041 * @scsi_cmd: scsi command struct
4046 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4048 unsigned long flags;
4053 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4054 rc = ipr_cancel_op(scsi_cmd);
4055 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4062 * ipr_handle_other_interrupt - Handle "other" interrupts
4063 * @ioa_cfg: ioa config struct
4064 * @int_reg: interrupt register
4067 * IRQ_NONE / IRQ_HANDLED
4069 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4070 volatile u32 int_reg)
4072 irqreturn_t rc = IRQ_HANDLED;
4074 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4075 /* Mask the interrupt */
4076 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4078 /* Clear the interrupt */
4079 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4080 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4082 list_del(&ioa_cfg->reset_cmd->queue);
4083 del_timer(&ioa_cfg->reset_cmd->timer);
4084 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4086 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4087 ioa_cfg->ioa_unit_checked = 1;
4089 dev_err(&ioa_cfg->pdev->dev,
4090 "Permanent IOA failure. 0x%08X\n", int_reg);
4092 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4093 ioa_cfg->sdt_state = GET_DUMP;
4095 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4096 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4103 * ipr_isr - Interrupt service routine
4105 * @devp: pointer to ioa config struct
4108 * IRQ_NONE / IRQ_HANDLED
4110 static irqreturn_t ipr_isr(int irq, void *devp)
4112 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4113 unsigned long lock_flags = 0;
4114 volatile u32 int_reg, int_mask_reg;
4117 struct ipr_cmnd *ipr_cmd;
4118 irqreturn_t rc = IRQ_NONE;
4120 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4122 /* If interrupts are disabled, ignore the interrupt */
4123 if (!ioa_cfg->allow_interrupts) {
4124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4129 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4131 /* If an interrupt on the adapter did not occur, ignore it */
4132 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4140 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4141 ioa_cfg->toggle_bit) {
4143 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4144 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4146 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4147 ioa_cfg->errors_logged++;
4148 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4150 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4151 ioa_cfg->sdt_state = GET_DUMP;
4153 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4158 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4160 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4162 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4164 list_del(&ipr_cmd->queue);
4165 del_timer(&ipr_cmd->timer);
4166 ipr_cmd->done(ipr_cmd);
4170 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4171 ioa_cfg->hrrq_curr++;
4173 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4174 ioa_cfg->toggle_bit ^= 1u;
4178 if (ipr_cmd != NULL) {
4179 /* Clear the PCI interrupt */
4180 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4181 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4186 if (unlikely(rc == IRQ_NONE))
4187 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4194 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4195 * @ioa_cfg: ioa config struct
4196 * @ipr_cmd: ipr command struct
4199 * 0 on success / -1 on failure
4201 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4202 struct ipr_cmnd *ipr_cmd)
4205 struct scatterlist *sglist;
4207 u32 ioadl_flags = 0;
4208 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4209 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4210 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4212 length = scsi_cmd->request_bufflen;
4217 if (scsi_cmd->use_sg) {
4218 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4219 scsi_cmd->request_buffer,
4221 scsi_cmd->sc_data_direction);
4223 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4224 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4225 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4226 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4227 ioarcb->write_ioadl_len =
4228 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4229 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4230 ioadl_flags = IPR_IOADL_FLAGS_READ;
4231 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4232 ioarcb->read_ioadl_len =
4233 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4236 sglist = scsi_cmd->request_buffer;
4238 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4239 ioadl = ioarcb->add_data.u.ioadl;
4240 ioarcb->write_ioadl_addr =
4241 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4242 offsetof(struct ipr_ioarcb, add_data));
4243 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4246 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4247 ioadl[i].flags_and_data_len =
4248 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4250 cpu_to_be32(sg_dma_address(&sglist[i]));
4253 if (likely(ipr_cmd->dma_use_sg)) {
4254 ioadl[i-1].flags_and_data_len |=
4255 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4258 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4260 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4261 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4262 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4263 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4264 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4265 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4266 ioadl_flags = IPR_IOADL_FLAGS_READ;
4267 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4268 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4271 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4272 scsi_cmd->request_buffer, length,
4273 scsi_cmd->sc_data_direction);
4275 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4276 ioadl = ioarcb->add_data.u.ioadl;
4277 ioarcb->write_ioadl_addr =
4278 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4279 offsetof(struct ipr_ioarcb, add_data));
4280 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4281 ipr_cmd->dma_use_sg = 1;
4282 ioadl[0].flags_and_data_len =
4283 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4284 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4287 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4294 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4295 * @scsi_cmd: scsi command struct
4300 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4303 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4305 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4307 case MSG_SIMPLE_TAG:
4308 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4311 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4313 case MSG_ORDERED_TAG:
4314 rc = IPR_FLAGS_LO_ORDERED_TASK;
4323 * ipr_erp_done - Process completion of ERP for a device
4324 * @ipr_cmd: ipr command struct
4326 * This function copies the sense buffer into the scsi_cmd
4327 * struct and pushes the scsi_done function.
4332 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4334 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4335 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4336 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4337 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4339 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4340 scsi_cmd->result |= (DID_ERROR << 16);
4341 scmd_printk(KERN_ERR, scsi_cmd,
4342 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4344 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4345 SCSI_SENSE_BUFFERSIZE);
4349 if (!ipr_is_naca_model(res))
4350 res->needs_sync_complete = 1;
4353 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4354 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4355 scsi_cmd->scsi_done(scsi_cmd);
4359 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4360 * @ipr_cmd: ipr command struct
4365 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4367 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4368 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4369 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4371 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4372 ioarcb->write_data_transfer_length = 0;
4373 ioarcb->read_data_transfer_length = 0;
4374 ioarcb->write_ioadl_len = 0;
4375 ioarcb->read_ioadl_len = 0;
4377 ioasa->residual_data_len = 0;
4378 ioarcb->write_ioadl_addr =
4379 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4380 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4384 * ipr_erp_request_sense - Send request sense to a device
4385 * @ipr_cmd: ipr command struct
4387 * This function sends a request sense to a device as a result
4388 * of a check condition.
4393 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4395 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4396 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4398 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4399 ipr_erp_done(ipr_cmd);
4403 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4405 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4406 cmd_pkt->cdb[0] = REQUEST_SENSE;
4407 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4408 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4409 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4410 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4412 ipr_cmd->ioadl[0].flags_and_data_len =
4413 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4414 ipr_cmd->ioadl[0].address =
4415 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4417 ipr_cmd->ioarcb.read_ioadl_len =
4418 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4419 ipr_cmd->ioarcb.read_data_transfer_length =
4420 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4422 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4423 IPR_REQUEST_SENSE_TIMEOUT * 2);
4427 * ipr_erp_cancel_all - Send cancel all to a device
4428 * @ipr_cmd: ipr command struct
4430 * This function sends a cancel all to a device to clear the
4431 * queue. If we are running TCQ on the device, QERR is set to 1,
4432 * which means all outstanding ops have been dropped on the floor.
4433 * Cancel all will return them to us.
4438 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4440 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4441 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4442 struct ipr_cmd_pkt *cmd_pkt;
4446 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4448 if (!scsi_get_tag_type(scsi_cmd->device)) {
4449 ipr_erp_request_sense(ipr_cmd);
4453 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4454 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4455 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4457 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4458 IPR_CANCEL_ALL_TIMEOUT);
4462 * ipr_dump_ioasa - Dump contents of IOASA
4463 * @ioa_cfg: ioa config struct
4464 * @ipr_cmd: ipr command struct
4465 * @res: resource entry struct
4467 * This function is invoked by the interrupt handler when ops
4468 * fail. It will log the IOASA if appropriate. Only called
4474 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4475 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4479 u32 ioasc, fd_ioasc;
4480 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4481 __be32 *ioasa_data = (__be32 *)ioasa;
4484 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4485 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4490 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4493 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4494 error_index = ipr_get_error(fd_ioasc);
4496 error_index = ipr_get_error(ioasc);
4498 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4499 /* Don't log an error if the IOA already logged one */
4500 if (ioasa->ilid != 0)
4503 if (!ipr_is_gscsi(res))
4506 if (ipr_error_table[error_index].log_ioasa == 0)
4510 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4512 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4513 data_len = sizeof(struct ipr_ioasa);
4515 data_len = be16_to_cpu(ioasa->ret_stat_len);
4517 ipr_err("IOASA Dump:\n");
4519 for (i = 0; i < data_len / 4; i += 4) {
4520 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4521 be32_to_cpu(ioasa_data[i]),
4522 be32_to_cpu(ioasa_data[i+1]),
4523 be32_to_cpu(ioasa_data[i+2]),
4524 be32_to_cpu(ioasa_data[i+3]));
4529 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4531 * @sense_buf: sense data buffer
4536 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4539 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4540 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4541 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4542 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4544 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4546 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4549 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4551 if (ipr_is_vset_device(res) &&
4552 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4553 ioasa->u.vset.failing_lba_hi != 0) {
4554 sense_buf[0] = 0x72;
4555 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4556 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4557 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4561 sense_buf[9] = 0x0A;
4562 sense_buf[10] = 0x80;
4564 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4566 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4567 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4568 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4569 sense_buf[15] = failing_lba & 0x000000ff;
4571 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4573 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4574 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4575 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4576 sense_buf[19] = failing_lba & 0x000000ff;
4578 sense_buf[0] = 0x70;
4579 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4580 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4581 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4583 /* Illegal request */
4584 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4585 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4586 sense_buf[7] = 10; /* additional length */
4588 /* IOARCB was in error */
4589 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4590 sense_buf[15] = 0xC0;
4591 else /* Parameter data was invalid */
4592 sense_buf[15] = 0x80;
4595 ((IPR_FIELD_POINTER_MASK &
4596 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4598 (IPR_FIELD_POINTER_MASK &
4599 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4601 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4602 if (ipr_is_vset_device(res))
4603 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4605 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4607 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4608 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4609 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4610 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4611 sense_buf[6] = failing_lba & 0x000000ff;
4614 sense_buf[7] = 6; /* additional length */
4620 * ipr_get_autosense - Copy autosense data to sense buffer
4621 * @ipr_cmd: ipr command struct
4623 * This function copies the autosense buffer to the buffer
4624 * in the scsi_cmd, if there is autosense available.
4627 * 1 if autosense was available / 0 if not
4629 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4631 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4633 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4636 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4637 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4638 SCSI_SENSE_BUFFERSIZE));
4643 * ipr_erp_start - Process an error response for a SCSI op
4644 * @ioa_cfg: ioa config struct
4645 * @ipr_cmd: ipr command struct
4647 * This function determines whether or not to initiate ERP
4648 * on the affected device.
4653 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4654 struct ipr_cmnd *ipr_cmd)
4656 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4657 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4658 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4661 ipr_scsi_eh_done(ipr_cmd);
4665 if (!ipr_is_gscsi(res))
4666 ipr_gen_sense(ipr_cmd);
4668 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4670 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4671 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4672 if (ipr_is_naca_model(res))
4673 scsi_cmd->result |= (DID_ABORT << 16);
4675 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4677 case IPR_IOASC_IR_RESOURCE_HANDLE:
4678 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4679 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4681 case IPR_IOASC_HW_SEL_TIMEOUT:
4682 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4683 if (!ipr_is_naca_model(res))
4684 res->needs_sync_complete = 1;
4686 case IPR_IOASC_SYNC_REQUIRED:
4688 res->needs_sync_complete = 1;
4689 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4691 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4692 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4693 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4695 case IPR_IOASC_BUS_WAS_RESET:
4696 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4698 * Report the bus reset and ask for a retry. The device
4699 * will give CC/UA the next command.
4701 if (!res->resetting_device)
4702 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4703 scsi_cmd->result |= (DID_ERROR << 16);
4704 if (!ipr_is_naca_model(res))
4705 res->needs_sync_complete = 1;
4707 case IPR_IOASC_HW_DEV_BUS_STATUS:
4708 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4709 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4710 if (!ipr_get_autosense(ipr_cmd)) {
4711 if (!ipr_is_naca_model(res)) {
4712 ipr_erp_cancel_all(ipr_cmd);
4717 if (!ipr_is_naca_model(res))
4718 res->needs_sync_complete = 1;
4720 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4723 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4724 scsi_cmd->result |= (DID_ERROR << 16);
4725 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4726 res->needs_sync_complete = 1;
4730 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4731 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4732 scsi_cmd->scsi_done(scsi_cmd);
4736 * ipr_scsi_done - mid-layer done function
4737 * @ipr_cmd: ipr command struct
4739 * This function is invoked by the interrupt handler for
4740 * ops generated by the SCSI mid-layer
4745 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4747 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4748 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4749 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4751 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4753 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4754 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4755 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4756 scsi_cmd->scsi_done(scsi_cmd);
4758 ipr_erp_start(ioa_cfg, ipr_cmd);
4762 * ipr_queuecommand - Queue a mid-layer request
4763 * @scsi_cmd: scsi command struct
4764 * @done: done function
4766 * This function queues a request generated by the mid-layer.
4770 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4771 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4773 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4774 void (*done) (struct scsi_cmnd *))
4776 struct ipr_ioa_cfg *ioa_cfg;
4777 struct ipr_resource_entry *res;
4778 struct ipr_ioarcb *ioarcb;
4779 struct ipr_cmnd *ipr_cmd;
4782 scsi_cmd->scsi_done = done;
4783 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4784 res = scsi_cmd->device->hostdata;
4785 scsi_cmd->result = (DID_OK << 16);
4788 * We are currently blocking all devices due to a host reset
4789 * We have told the host to stop giving us new requests, but
4790 * ERP ops don't count. FIXME
4792 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4793 return SCSI_MLQUEUE_HOST_BUSY;
4796 * FIXME - Create scsi_set_host_offline interface
4797 * and the ioa_is_dead check can be removed
4799 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4800 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4801 scsi_cmd->result = (DID_NO_CONNECT << 16);
4802 scsi_cmd->scsi_done(scsi_cmd);
4806 if (ipr_is_gata(res) && res->sata_port)
4807 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4809 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4810 ioarcb = &ipr_cmd->ioarcb;
4811 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4813 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4814 ipr_cmd->scsi_cmd = scsi_cmd;
4815 ioarcb->res_handle = res->cfgte.res_handle;
4816 ipr_cmd->done = ipr_scsi_done;
4817 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4819 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4820 if (scsi_cmd->underflow == 0)
4821 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4823 if (res->needs_sync_complete) {
4824 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4825 res->needs_sync_complete = 0;
4828 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4829 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4830 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4831 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4834 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4835 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4836 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4838 if (likely(rc == 0))
4839 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4841 if (likely(rc == 0)) {
4843 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4844 ioa_cfg->regs.ioarrin_reg);
4846 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4847 return SCSI_MLQUEUE_HOST_BUSY;
4854 * ipr_ioctl - IOCTL handler
4855 * @sdev: scsi device struct
4860 * 0 on success / other on failure
4862 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4864 struct ipr_resource_entry *res;
4866 res = (struct ipr_resource_entry *)sdev->hostdata;
4867 if (res && ipr_is_gata(res))
4868 return ata_scsi_ioctl(sdev, cmd, arg);
4874 * ipr_info - Get information about the card/driver
4875 * @scsi_host: scsi host struct
4878 * pointer to buffer with description string
4880 static const char * ipr_ioa_info(struct Scsi_Host *host)
4882 static char buffer[512];
4883 struct ipr_ioa_cfg *ioa_cfg;
4884 unsigned long lock_flags = 0;
4886 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4888 spin_lock_irqsave(host->host_lock, lock_flags);
4889 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4890 spin_unlock_irqrestore(host->host_lock, lock_flags);
4895 static struct scsi_host_template driver_template = {
4896 .module = THIS_MODULE,
4898 .info = ipr_ioa_info,
4900 .queuecommand = ipr_queuecommand,
4901 .eh_abort_handler = ipr_eh_abort,
4902 .eh_device_reset_handler = ipr_eh_dev_reset,
4903 .eh_host_reset_handler = ipr_eh_host_reset,
4904 .slave_alloc = ipr_slave_alloc,
4905 .slave_configure = ipr_slave_configure,
4906 .slave_destroy = ipr_slave_destroy,
4907 .target_alloc = ipr_target_alloc,
4908 .target_destroy = ipr_target_destroy,
4909 .change_queue_depth = ipr_change_queue_depth,
4910 .change_queue_type = ipr_change_queue_type,
4911 .bios_param = ipr_biosparam,
4912 .can_queue = IPR_MAX_COMMANDS,
4914 .sg_tablesize = IPR_MAX_SGLIST,
4915 .max_sectors = IPR_IOA_MAX_SECTORS,
4916 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4917 .use_clustering = ENABLE_CLUSTERING,
4918 .shost_attrs = ipr_ioa_attrs,
4919 .sdev_attrs = ipr_dev_attrs,
4920 .proc_name = IPR_NAME
4924 * ipr_ata_phy_reset - libata phy_reset handler
4925 * @ap: ata port to reset
4928 static void ipr_ata_phy_reset(struct ata_port *ap)
4930 unsigned long flags;
4931 struct ipr_sata_port *sata_port = ap->private_data;
4932 struct ipr_resource_entry *res = sata_port->res;
4933 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4937 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4938 while(ioa_cfg->in_reset_reload) {
4939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4940 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4941 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4944 if (!ioa_cfg->allow_cmds)
4947 rc = ipr_device_reset(ioa_cfg, res);
4950 ap->ops->port_disable(ap);
4954 switch(res->cfgte.proto) {
4955 case IPR_PROTO_SATA:
4956 case IPR_PROTO_SAS_STP:
4957 ap->device[0].class = ATA_DEV_ATA;
4959 case IPR_PROTO_SATA_ATAPI:
4960 case IPR_PROTO_SAS_STP_ATAPI:
4961 ap->device[0].class = ATA_DEV_ATAPI;
4964 ap->device[0].class = ATA_DEV_UNKNOWN;
4965 ap->ops->port_disable(ap);
4970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4975 * ipr_ata_post_internal - Cleanup after an internal command
4976 * @qc: ATA queued command
4981 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4983 struct ipr_sata_port *sata_port = qc->ap->private_data;
4984 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4985 struct ipr_cmnd *ipr_cmd;
4986 unsigned long flags;
4988 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4989 while(ioa_cfg->in_reset_reload) {
4990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4991 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4992 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4995 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4996 if (ipr_cmd->qc == qc) {
4997 ipr_device_reset(ioa_cfg, sata_port->res);
5001 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5005 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5007 * @tf: destination ATA taskfile
5012 static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5014 struct ipr_sata_port *sata_port = ap->private_data;
5015 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5017 tf->feature = g->error;
5018 tf->nsect = g->nsect;
5022 tf->device = g->device;
5023 tf->command = g->status;
5024 tf->hob_nsect = g->hob_nsect;
5025 tf->hob_lbal = g->hob_lbal;
5026 tf->hob_lbam = g->hob_lbam;
5027 tf->hob_lbah = g->hob_lbah;
5028 tf->ctl = g->alt_status;
5032 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5033 * @regs: destination
5034 * @tf: source ATA taskfile
5039 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5040 struct ata_taskfile *tf)
5042 regs->feature = tf->feature;
5043 regs->nsect = tf->nsect;
5044 regs->lbal = tf->lbal;
5045 regs->lbam = tf->lbam;
5046 regs->lbah = tf->lbah;
5047 regs->device = tf->device;
5048 regs->command = tf->command;
5049 regs->hob_feature = tf->hob_feature;
5050 regs->hob_nsect = tf->hob_nsect;
5051 regs->hob_lbal = tf->hob_lbal;
5052 regs->hob_lbam = tf->hob_lbam;
5053 regs->hob_lbah = tf->hob_lbah;
5054 regs->ctl = tf->ctl;
5058 * ipr_sata_done - done function for SATA commands
5059 * @ipr_cmd: ipr command struct
5061 * This function is invoked by the interrupt handler for
5062 * ops generated by the SCSI mid-layer to SATA devices
5067 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5069 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5070 struct ata_queued_cmd *qc = ipr_cmd->qc;
5071 struct ipr_sata_port *sata_port = qc->ap->private_data;
5072 struct ipr_resource_entry *res = sata_port->res;
5073 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5075 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5076 sizeof(struct ipr_ioasa_gata));
5077 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5079 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5080 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5081 res->cfgte.res_addr.target);
5083 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5084 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5086 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5087 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5088 ata_qc_complete(qc);
5092 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5093 * @ipr_cmd: ipr command struct
5094 * @qc: ATA queued command
5097 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5098 struct ata_queued_cmd *qc)
5100 u32 ioadl_flags = 0;
5101 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5102 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5103 int len = qc->nbytes + qc->pad_len;
5104 struct scatterlist *sg;
5109 if (qc->dma_dir == DMA_TO_DEVICE) {
5110 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5111 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5112 ioarcb->write_data_transfer_length = cpu_to_be32(len);
5113 ioarcb->write_ioadl_len =
5114 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5115 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5116 ioadl_flags = IPR_IOADL_FLAGS_READ;
5117 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5118 ioarcb->read_ioadl_len =
5119 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5122 ata_for_each_sg(sg, qc) {
5123 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5124 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5125 if (ata_sg_is_last(sg, qc))
5126 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5133 * ipr_qc_issue - Issue a SATA qc to a device
5134 * @qc: queued command
5139 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5141 struct ata_port *ap = qc->ap;
5142 struct ipr_sata_port *sata_port = ap->private_data;
5143 struct ipr_resource_entry *res = sata_port->res;
5144 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5145 struct ipr_cmnd *ipr_cmd;
5146 struct ipr_ioarcb *ioarcb;
5147 struct ipr_ioarcb_ata_regs *regs;
5149 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5150 return AC_ERR_SYSTEM;
5152 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5153 ioarcb = &ipr_cmd->ioarcb;
5154 regs = &ioarcb->add_data.u.regs;
5156 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5157 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5159 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5161 ipr_cmd->done = ipr_sata_done;
5162 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5163 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5164 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5165 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5166 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5168 ipr_build_ata_ioadl(ipr_cmd, qc);
5169 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5170 ipr_copy_sata_tf(regs, &qc->tf);
5171 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5172 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5174 switch (qc->tf.protocol) {
5175 case ATA_PROT_NODATA:
5180 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5183 case ATA_PROT_ATAPI:
5184 case ATA_PROT_ATAPI_NODATA:
5185 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5188 case ATA_PROT_ATAPI_DMA:
5189 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5190 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5195 return AC_ERR_INVALID;
5199 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5200 ioa_cfg->regs.ioarrin_reg);
5205 * ipr_ata_check_status - Return last ATA status
5211 static u8 ipr_ata_check_status(struct ata_port *ap)
5213 struct ipr_sata_port *sata_port = ap->private_data;
5214 return sata_port->ioasa.status;
5218 * ipr_ata_check_altstatus - Return last ATA altstatus
5224 static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5226 struct ipr_sata_port *sata_port = ap->private_data;
5227 return sata_port->ioasa.alt_status;
5230 static struct ata_port_operations ipr_sata_ops = {
5231 .port_disable = ata_port_disable,
5232 .check_status = ipr_ata_check_status,
5233 .check_altstatus = ipr_ata_check_altstatus,
5234 .dev_select = ata_noop_dev_select,
5235 .phy_reset = ipr_ata_phy_reset,
5236 .post_internal_cmd = ipr_ata_post_internal,
5237 .tf_read = ipr_tf_read,
5238 .qc_prep = ata_noop_qc_prep,
5239 .qc_issue = ipr_qc_issue,
5240 .port_start = ata_sas_port_start,
5241 .port_stop = ata_sas_port_stop
5244 static struct ata_port_info sata_port_info = {
5245 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5246 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5247 .pio_mask = 0x10, /* pio4 */
5249 .udma_mask = 0x7f, /* udma0-6 */
5250 .port_ops = &ipr_sata_ops
5253 #ifdef CONFIG_PPC_PSERIES
5254 static const u16 ipr_blocked_processors[] = {
5266 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5267 * @ioa_cfg: ioa cfg struct
5269 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5270 * certain pSeries hardware. This function determines if the given
5271 * adapter is in one of these confgurations or not.
5274 * 1 if adapter is not supported / 0 if adapter is supported
5276 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5281 if (ioa_cfg->type == 0x5702) {
5282 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5283 &rev_id) == PCIBIOS_SUCCESSFUL) {
5285 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5286 if (__is_processor(ipr_blocked_processors[i]))
5295 #define ipr_invalid_adapter(ioa_cfg) 0
5299 * ipr_ioa_bringdown_done - IOA bring down completion.
5300 * @ipr_cmd: ipr command struct
5302 * This function processes the completion of an adapter bring down.
5303 * It wakes any reset sleepers.
5308 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5313 ioa_cfg->in_reset_reload = 0;
5314 ioa_cfg->reset_retries = 0;
5315 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5316 wake_up_all(&ioa_cfg->reset_wait_q);
5318 spin_unlock_irq(ioa_cfg->host->host_lock);
5319 scsi_unblock_requests(ioa_cfg->host);
5320 spin_lock_irq(ioa_cfg->host->host_lock);
5323 return IPR_RC_JOB_RETURN;
5327 * ipr_ioa_reset_done - IOA reset completion.
5328 * @ipr_cmd: ipr command struct
5330 * This function processes the completion of an adapter reset.
5331 * It schedules any necessary mid-layer add/removes and
5332 * wakes any reset sleepers.
5337 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5339 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5340 struct ipr_resource_entry *res;
5341 struct ipr_hostrcb *hostrcb, *temp;
5345 ioa_cfg->in_reset_reload = 0;
5346 ioa_cfg->allow_cmds = 1;
5347 ioa_cfg->reset_cmd = NULL;
5348 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5350 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5351 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5356 schedule_work(&ioa_cfg->work_q);
5358 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5359 list_del(&hostrcb->queue);
5360 if (i++ < IPR_NUM_LOG_HCAMS)
5361 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5363 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5366 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5368 ioa_cfg->reset_retries = 0;
5369 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5370 wake_up_all(&ioa_cfg->reset_wait_q);
5372 spin_unlock_irq(ioa_cfg->host->host_lock);
5373 scsi_unblock_requests(ioa_cfg->host);
5374 spin_lock_irq(ioa_cfg->host->host_lock);
5376 if (!ioa_cfg->allow_cmds)
5377 scsi_block_requests(ioa_cfg->host);
5380 return IPR_RC_JOB_RETURN;
5384 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5385 * @supported_dev: supported device struct
5386 * @vpids: vendor product id struct
5391 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5392 struct ipr_std_inq_vpids *vpids)
5394 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5395 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5396 supported_dev->num_records = 1;
5397 supported_dev->data_length =
5398 cpu_to_be16(sizeof(struct ipr_supported_device));
5399 supported_dev->reserved = 0;
5403 * ipr_set_supported_devs - Send Set Supported Devices for a device
5404 * @ipr_cmd: ipr command struct
5406 * This function send a Set Supported Devices to the adapter
5409 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5411 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5413 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5414 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5415 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5416 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5417 struct ipr_resource_entry *res = ipr_cmd->u.res;
5419 ipr_cmd->job_step = ipr_ioa_reset_done;
5421 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5422 if (!ipr_is_scsi_disk(res))
5425 ipr_cmd->u.res = res;
5426 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5428 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5429 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5430 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5432 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5433 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5434 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5436 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5437 sizeof(struct ipr_supported_device));
5438 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5439 offsetof(struct ipr_misc_cbs, supp_dev));
5440 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5441 ioarcb->write_data_transfer_length =
5442 cpu_to_be32(sizeof(struct ipr_supported_device));
5444 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5445 IPR_SET_SUP_DEVICE_TIMEOUT);
5447 ipr_cmd->job_step = ipr_set_supported_devs;
5448 return IPR_RC_JOB_RETURN;
5451 return IPR_RC_JOB_CONTINUE;
5455 * ipr_setup_write_cache - Disable write cache if needed
5456 * @ipr_cmd: ipr command struct
5458 * This function sets up adapters write cache to desired setting
5461 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5463 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5465 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5467 ipr_cmd->job_step = ipr_set_supported_devs;
5468 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5469 struct ipr_resource_entry, queue);
5471 if (ioa_cfg->cache_state != CACHE_DISABLED)
5472 return IPR_RC_JOB_CONTINUE;
5474 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5475 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5476 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5477 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5479 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5481 return IPR_RC_JOB_RETURN;
5485 * ipr_get_mode_page - Locate specified mode page
5486 * @mode_pages: mode page buffer
5487 * @page_code: page code to find
5488 * @len: minimum required length for mode page
5491 * pointer to mode page / NULL on failure
5493 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5494 u32 page_code, u32 len)
5496 struct ipr_mode_page_hdr *mode_hdr;
5500 if (!mode_pages || (mode_pages->hdr.length == 0))
5503 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5504 mode_hdr = (struct ipr_mode_page_hdr *)
5505 (mode_pages->data + mode_pages->hdr.block_desc_len);
5508 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5509 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5513 page_length = (sizeof(struct ipr_mode_page_hdr) +
5514 mode_hdr->page_length);
5515 length -= page_length;
5516 mode_hdr = (struct ipr_mode_page_hdr *)
5517 ((unsigned long)mode_hdr + page_length);
5524 * ipr_check_term_power - Check for term power errors
5525 * @ioa_cfg: ioa config struct
5526 * @mode_pages: IOAFP mode pages buffer
5528 * Check the IOAFP's mode page 28 for term power errors
5533 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5534 struct ipr_mode_pages *mode_pages)
5538 struct ipr_dev_bus_entry *bus;
5539 struct ipr_mode_page28 *mode_page;
5541 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5542 sizeof(struct ipr_mode_page28));
5544 entry_length = mode_page->entry_length;
5546 bus = mode_page->bus;
5548 for (i = 0; i < mode_page->num_entries; i++) {
5549 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5550 dev_err(&ioa_cfg->pdev->dev,
5551 "Term power is absent on scsi bus %d\n",
5555 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5560 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5561 * @ioa_cfg: ioa config struct
5563 * Looks through the config table checking for SES devices. If
5564 * the SES device is in the SES table indicating a maximum SCSI
5565 * bus speed, the speed is limited for the bus.
5570 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5575 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5576 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5577 ioa_cfg->bus_attr[i].bus_width);
5579 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5580 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5585 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5586 * @ioa_cfg: ioa config struct
5587 * @mode_pages: mode page 28 buffer
5589 * Updates mode page 28 based on driver configuration
5594 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5595 struct ipr_mode_pages *mode_pages)
5597 int i, entry_length;
5598 struct ipr_dev_bus_entry *bus;
5599 struct ipr_bus_attributes *bus_attr;
5600 struct ipr_mode_page28 *mode_page;
5602 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5603 sizeof(struct ipr_mode_page28));
5605 entry_length = mode_page->entry_length;
5607 /* Loop for each device bus entry */
5608 for (i = 0, bus = mode_page->bus;
5609 i < mode_page->num_entries;
5610 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5611 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5612 dev_err(&ioa_cfg->pdev->dev,
5613 "Invalid resource address reported: 0x%08X\n",
5614 IPR_GET_PHYS_LOC(bus->res_addr));
5618 bus_attr = &ioa_cfg->bus_attr[i];
5619 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5620 bus->bus_width = bus_attr->bus_width;
5621 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5622 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5623 if (bus_attr->qas_enabled)
5624 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5626 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5631 * ipr_build_mode_select - Build a mode select command
5632 * @ipr_cmd: ipr command struct
5633 * @res_handle: resource handle to send command to
5634 * @parm: Byte 2 of Mode Sense command
5635 * @dma_addr: DMA buffer address
5636 * @xfer_len: data transfer length
5641 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5642 __be32 res_handle, u8 parm, u32 dma_addr,
5645 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5646 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5648 ioarcb->res_handle = res_handle;
5649 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5650 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5651 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5652 ioarcb->cmd_pkt.cdb[1] = parm;
5653 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5655 ioadl->flags_and_data_len =
5656 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5657 ioadl->address = cpu_to_be32(dma_addr);
5658 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5659 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5663 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5664 * @ipr_cmd: ipr command struct
5666 * This function sets up the SCSI bus attributes and sends
5667 * a Mode Select for Page 28 to activate them.
5672 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5675 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5679 ipr_scsi_bus_speed_limit(ioa_cfg);
5680 ipr_check_term_power(ioa_cfg, mode_pages);
5681 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5682 length = mode_pages->hdr.length + 1;
5683 mode_pages->hdr.length = 0;
5685 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5686 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5689 ipr_cmd->job_step = ipr_setup_write_cache;
5690 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5693 return IPR_RC_JOB_RETURN;
5697 * ipr_build_mode_sense - Builds a mode sense command
5698 * @ipr_cmd: ipr command struct
5699 * @res: resource entry struct
5700 * @parm: Byte 2 of mode sense command
5701 * @dma_addr: DMA address of mode sense buffer
5702 * @xfer_len: Size of DMA buffer
5707 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5709 u8 parm, u32 dma_addr, u8 xfer_len)
5711 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5712 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5714 ioarcb->res_handle = res_handle;
5715 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5716 ioarcb->cmd_pkt.cdb[2] = parm;
5717 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5718 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5720 ioadl->flags_and_data_len =
5721 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5722 ioadl->address = cpu_to_be32(dma_addr);
5723 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5724 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5728 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5729 * @ipr_cmd: ipr command struct
5731 * This function handles the failure of an IOA bringup command.
5736 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5738 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5739 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5741 dev_err(&ioa_cfg->pdev->dev,
5742 "0x%02X failed with IOASC: 0x%08X\n",
5743 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5745 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5746 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5747 return IPR_RC_JOB_RETURN;
5751 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5752 * @ipr_cmd: ipr command struct
5754 * This function handles the failure of a Mode Sense to the IOAFP.
5755 * Some adapters do not handle all mode pages.
5758 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5760 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5762 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5764 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5765 ipr_cmd->job_step = ipr_setup_write_cache;
5766 return IPR_RC_JOB_CONTINUE;
5769 return ipr_reset_cmd_failed(ipr_cmd);
5773 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5774 * @ipr_cmd: ipr command struct
5776 * This function send a Page 28 mode sense to the IOA to
5777 * retrieve SCSI bus attributes.
5782 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5787 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5788 0x28, ioa_cfg->vpd_cbs_dma +
5789 offsetof(struct ipr_misc_cbs, mode_pages),
5790 sizeof(struct ipr_mode_pages));
5792 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5793 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5795 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5798 return IPR_RC_JOB_RETURN;
5802 * ipr_init_res_table - Initialize the resource table
5803 * @ipr_cmd: ipr command struct
5805 * This function looks through the existing resource table, comparing
5806 * it with the config table. This function will take care of old/new
5807 * devices and schedule adding/removing them from the mid-layer
5811 * IPR_RC_JOB_CONTINUE
5813 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5815 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5816 struct ipr_resource_entry *res, *temp;
5817 struct ipr_config_table_entry *cfgte;
5822 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5823 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5825 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5826 list_move_tail(&res->queue, &old_res);
5828 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5829 cfgte = &ioa_cfg->cfg_table->dev[i];
5832 list_for_each_entry_safe(res, temp, &old_res, queue) {
5833 if (!memcmp(&res->cfgte.res_addr,
5834 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5835 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5842 if (list_empty(&ioa_cfg->free_res_q)) {
5843 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5848 res = list_entry(ioa_cfg->free_res_q.next,
5849 struct ipr_resource_entry, queue);
5850 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5851 ipr_init_res_entry(res);
5856 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5859 list_for_each_entry_safe(res, temp, &old_res, queue) {
5861 res->del_from_ml = 1;
5862 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5863 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5865 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5869 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5872 return IPR_RC_JOB_CONTINUE;
5876 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5877 * @ipr_cmd: ipr command struct
5879 * This function sends a Query IOA Configuration command
5880 * to the adapter to retrieve the IOA configuration table.
5885 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5887 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5888 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5889 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5890 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5893 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5894 ucode_vpd->major_release, ucode_vpd->card_type,
5895 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5896 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5897 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5899 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5900 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5901 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5903 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5904 ioarcb->read_data_transfer_length =
5905 cpu_to_be32(sizeof(struct ipr_config_table));
5907 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5908 ioadl->flags_and_data_len =
5909 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5911 ipr_cmd->job_step = ipr_init_res_table;
5913 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5916 return IPR_RC_JOB_RETURN;
5920 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5921 * @ipr_cmd: ipr command struct
5923 * This utility function sends an inquiry to the adapter.
5928 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5929 u32 dma_addr, u8 xfer_len)
5931 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5932 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5935 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5936 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5938 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5939 ioarcb->cmd_pkt.cdb[1] = flags;
5940 ioarcb->cmd_pkt.cdb[2] = page;
5941 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5943 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5944 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5946 ioadl->address = cpu_to_be32(dma_addr);
5947 ioadl->flags_and_data_len =
5948 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5950 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5955 * ipr_inquiry_page_supported - Is the given inquiry page supported
5956 * @page0: inquiry page 0 buffer
5959 * This function determines if the specified inquiry page is supported.
5962 * 1 if page is supported / 0 if not
5964 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5968 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5969 if (page0->page[i] == page)
5976 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5977 * @ipr_cmd: ipr command struct
5979 * This function sends a Page 3 inquiry to the adapter
5980 * to retrieve software VPD information.
5983 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5985 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5987 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5988 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5992 if (!ipr_inquiry_page_supported(page0, 1))
5993 ioa_cfg->cache_state = CACHE_NONE;
5995 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5997 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5998 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5999 sizeof(struct ipr_inquiry_page3));
6002 return IPR_RC_JOB_RETURN;
6006 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6007 * @ipr_cmd: ipr command struct
6009 * This function sends a Page 0 inquiry to the adapter
6010 * to retrieve supported inquiry pages.
6013 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6015 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6017 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6022 /* Grab the type out of the VPD and store it away */
6023 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6025 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6027 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6029 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6030 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6031 sizeof(struct ipr_inquiry_page0));
6034 return IPR_RC_JOB_RETURN;
6038 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6039 * @ipr_cmd: ipr command struct
6041 * This function sends a standard inquiry to the adapter.
6046 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6048 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6051 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6053 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6054 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6055 sizeof(struct ipr_ioa_vpd));
6058 return IPR_RC_JOB_RETURN;
6062 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6063 * @ipr_cmd: ipr command struct
6065 * This function send an Identify Host Request Response Queue
6066 * command to establish the HRRQ with the adapter.
6071 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6073 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6074 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6077 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6079 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6080 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6082 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6083 ioarcb->cmd_pkt.cdb[2] =
6084 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6085 ioarcb->cmd_pkt.cdb[3] =
6086 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6087 ioarcb->cmd_pkt.cdb[4] =
6088 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6089 ioarcb->cmd_pkt.cdb[5] =
6090 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6091 ioarcb->cmd_pkt.cdb[7] =
6092 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6093 ioarcb->cmd_pkt.cdb[8] =
6094 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6096 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6098 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6101 return IPR_RC_JOB_RETURN;
6105 * ipr_reset_timer_done - Adapter reset timer function
6106 * @ipr_cmd: ipr command struct
6108 * Description: This function is used in adapter reset processing
6109 * for timing events. If the reset_cmd pointer in the IOA
6110 * config struct is not this adapter's we are doing nested
6111 * resets and fail_all_ops will take care of freeing the
6117 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6119 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6120 unsigned long lock_flags = 0;
6122 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6124 if (ioa_cfg->reset_cmd == ipr_cmd) {
6125 list_del(&ipr_cmd->queue);
6126 ipr_cmd->done(ipr_cmd);
6129 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6133 * ipr_reset_start_timer - Start a timer for adapter reset job
6134 * @ipr_cmd: ipr command struct
6135 * @timeout: timeout value
6137 * Description: This function is used in adapter reset processing
6138 * for timing events. If the reset_cmd pointer in the IOA
6139 * config struct is not this adapter's we are doing nested
6140 * resets and fail_all_ops will take care of freeing the
6146 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6147 unsigned long timeout)
6149 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6150 ipr_cmd->done = ipr_reset_ioa_job;
6152 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6153 ipr_cmd->timer.expires = jiffies + timeout;
6154 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6155 add_timer(&ipr_cmd->timer);
6159 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6160 * @ioa_cfg: ioa cfg struct
6165 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6167 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6169 /* Initialize Host RRQ pointers */
6170 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6171 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6172 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6173 ioa_cfg->toggle_bit = 1;
6175 /* Zero out config table */
6176 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6180 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6181 * @ipr_cmd: ipr command struct
6183 * This function reinitializes some control blocks and
6184 * enables destructive diagnostics on the adapter.
6189 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6191 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6192 volatile u32 int_reg;
6195 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6196 ipr_init_ioa_mem(ioa_cfg);
6198 ioa_cfg->allow_interrupts = 1;
6199 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6201 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6202 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6203 ioa_cfg->regs.clr_interrupt_mask_reg);
6204 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6205 return IPR_RC_JOB_CONTINUE;
6208 /* Enable destructive diagnostics on IOA */
6209 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6211 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6212 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6214 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6216 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6217 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6218 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6219 ipr_cmd->done = ipr_reset_ioa_job;
6220 add_timer(&ipr_cmd->timer);
6221 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6224 return IPR_RC_JOB_RETURN;
6228 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6229 * @ipr_cmd: ipr command struct
6231 * This function is invoked when an adapter dump has run out
6232 * of processing time.
6235 * IPR_RC_JOB_CONTINUE
6237 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6239 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6241 if (ioa_cfg->sdt_state == GET_DUMP)
6242 ioa_cfg->sdt_state = ABORT_DUMP;
6244 ipr_cmd->job_step = ipr_reset_alert;
6246 return IPR_RC_JOB_CONTINUE;
6250 * ipr_unit_check_no_data - Log a unit check/no data error log
6251 * @ioa_cfg: ioa config struct
6253 * Logs an error indicating the adapter unit checked, but for some
6254 * reason, we were unable to fetch the unit check buffer.
6259 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6261 ioa_cfg->errors_logged++;
6262 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6266 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6267 * @ioa_cfg: ioa config struct
6269 * Fetches the unit check buffer from the adapter by clocking the data
6270 * through the mailbox register.
6275 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6277 unsigned long mailbox;
6278 struct ipr_hostrcb *hostrcb;
6279 struct ipr_uc_sdt sdt;
6282 mailbox = readl(ioa_cfg->ioa_mailbox);
6284 if (!ipr_sdt_is_fmt2(mailbox)) {
6285 ipr_unit_check_no_data(ioa_cfg);
6289 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6290 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6291 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6293 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6294 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6295 ipr_unit_check_no_data(ioa_cfg);
6299 /* Find length of the first sdt entry (UC buffer) */
6300 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6301 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6303 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6304 struct ipr_hostrcb, queue);
6305 list_del(&hostrcb->queue);
6306 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6308 rc = ipr_get_ldump_data_section(ioa_cfg,
6309 be32_to_cpu(sdt.entry[0].bar_str_offset),
6310 (__be32 *)&hostrcb->hcam,
6311 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6314 ipr_handle_log_data(ioa_cfg, hostrcb);
6316 ipr_unit_check_no_data(ioa_cfg);
6318 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6322 * ipr_reset_restore_cfg_space - Restore PCI config space.
6323 * @ipr_cmd: ipr command struct
6325 * Description: This function restores the saved PCI config space of
6326 * the adapter, fails all outstanding ops back to the callers, and
6327 * fetches the dump/unit check if applicable to this reset.
6330 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6332 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6338 rc = pci_restore_state(ioa_cfg->pdev);
6340 if (rc != PCIBIOS_SUCCESSFUL) {
6341 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6342 return IPR_RC_JOB_CONTINUE;
6345 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6346 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6347 return IPR_RC_JOB_CONTINUE;
6350 ipr_fail_all_ops(ioa_cfg);
6352 if (ioa_cfg->ioa_unit_checked) {
6353 ioa_cfg->ioa_unit_checked = 0;
6354 ipr_get_unit_check_buffer(ioa_cfg);
6355 ipr_cmd->job_step = ipr_reset_alert;
6356 ipr_reset_start_timer(ipr_cmd, 0);
6357 return IPR_RC_JOB_RETURN;
6360 if (ioa_cfg->in_ioa_bringdown) {
6361 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6363 ipr_cmd->job_step = ipr_reset_enable_ioa;
6365 if (GET_DUMP == ioa_cfg->sdt_state) {
6366 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6367 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6368 schedule_work(&ioa_cfg->work_q);
6369 return IPR_RC_JOB_RETURN;
6374 return IPR_RC_JOB_CONTINUE;
6378 * ipr_reset_bist_done - BIST has completed on the adapter.
6379 * @ipr_cmd: ipr command struct
6381 * Description: Unblock config space and resume the reset process.
6384 * IPR_RC_JOB_CONTINUE
6386 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6389 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6390 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6392 return IPR_RC_JOB_CONTINUE;
6396 * ipr_reset_start_bist - Run BIST on the adapter.
6397 * @ipr_cmd: ipr command struct
6399 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6402 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6404 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6406 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6410 pci_block_user_cfg_access(ioa_cfg->pdev);
6411 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6413 if (rc != PCIBIOS_SUCCESSFUL) {
6414 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6415 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6416 rc = IPR_RC_JOB_CONTINUE;
6418 ipr_cmd->job_step = ipr_reset_bist_done;
6419 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6420 rc = IPR_RC_JOB_RETURN;
6428 * ipr_reset_allowed - Query whether or not IOA can be reset
6429 * @ioa_cfg: ioa config struct
6432 * 0 if reset not allowed / non-zero if reset is allowed
6434 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6436 volatile u32 temp_reg;
6438 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6439 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6443 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6444 * @ipr_cmd: ipr command struct
6446 * Description: This function waits for adapter permission to run BIST,
6447 * then runs BIST. If the adapter does not give permission after a
6448 * reasonable time, we will reset the adapter anyway. The impact of
6449 * resetting the adapter without warning the adapter is the risk of
6450 * losing the persistent error log on the adapter. If the adapter is
6451 * reset while it is writing to the flash on the adapter, the flash
6452 * segment will have bad ECC and be zeroed.
6455 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6457 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6460 int rc = IPR_RC_JOB_RETURN;
6462 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6463 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6464 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6466 ipr_cmd->job_step = ipr_reset_start_bist;
6467 rc = IPR_RC_JOB_CONTINUE;
6474 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6475 * @ipr_cmd: ipr command struct
6477 * Description: This function alerts the adapter that it will be reset.
6478 * If memory space is not currently enabled, proceed directly
6479 * to running BIST on the adapter. The timer must always be started
6480 * so we guarantee we do not run BIST from ipr_isr.
6485 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6487 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6492 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6494 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6495 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6496 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6497 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6499 ipr_cmd->job_step = ipr_reset_start_bist;
6502 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6503 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6506 return IPR_RC_JOB_RETURN;
6510 * ipr_reset_ucode_download_done - Microcode download completion
6511 * @ipr_cmd: ipr command struct
6513 * Description: This function unmaps the microcode download buffer.
6516 * IPR_RC_JOB_CONTINUE
6518 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6520 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6521 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6523 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6524 sglist->num_sg, DMA_TO_DEVICE);
6526 ipr_cmd->job_step = ipr_reset_alert;
6527 return IPR_RC_JOB_CONTINUE;
6531 * ipr_reset_ucode_download - Download microcode to the adapter
6532 * @ipr_cmd: ipr command struct
6534 * Description: This function checks to see if it there is microcode
6535 * to download to the adapter. If there is, a download is performed.
6538 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6540 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6543 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6546 ipr_cmd->job_step = ipr_reset_alert;
6549 return IPR_RC_JOB_CONTINUE;
6551 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6552 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6553 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6554 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6555 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6556 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6557 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6559 ipr_build_ucode_ioadl(ipr_cmd, sglist);
6560 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6562 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6563 IPR_WRITE_BUFFER_TIMEOUT);
6566 return IPR_RC_JOB_RETURN;
6570 * ipr_reset_shutdown_ioa - Shutdown the adapter
6571 * @ipr_cmd: ipr command struct
6573 * Description: This function issues an adapter shutdown of the
6574 * specified type to the specified adapter as part of the
6575 * adapter reset job.
6578 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6580 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6582 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6583 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6584 unsigned long timeout;
6585 int rc = IPR_RC_JOB_CONTINUE;
6588 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6589 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6590 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6591 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6592 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6594 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6595 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6596 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6597 timeout = IPR_INTERNAL_TIMEOUT;
6599 timeout = IPR_SHUTDOWN_TIMEOUT;
6601 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6603 rc = IPR_RC_JOB_RETURN;
6604 ipr_cmd->job_step = ipr_reset_ucode_download;
6606 ipr_cmd->job_step = ipr_reset_alert;
6613 * ipr_reset_ioa_job - Adapter reset job
6614 * @ipr_cmd: ipr command struct
6616 * Description: This function is the job router for the adapter reset job.
6621 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6627 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6629 if (ioa_cfg->reset_cmd != ipr_cmd) {
6631 * We are doing nested adapter resets and this is
6632 * not the current reset job.
6634 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6638 if (IPR_IOASC_SENSE_KEY(ioasc)) {
6639 rc = ipr_cmd->job_step_failed(ipr_cmd);
6640 if (rc == IPR_RC_JOB_RETURN)
6644 ipr_reinit_ipr_cmnd(ipr_cmd);
6645 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6646 rc = ipr_cmd->job_step(ipr_cmd);
6647 } while(rc == IPR_RC_JOB_CONTINUE);
6651 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6652 * @ioa_cfg: ioa config struct
6653 * @job_step: first job step of reset job
6654 * @shutdown_type: shutdown type
6656 * Description: This function will initiate the reset of the given adapter
6657 * starting at the selected job step.
6658 * If the caller needs to wait on the completion of the reset,
6659 * the caller must sleep on the reset_wait_q.
6664 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6665 int (*job_step) (struct ipr_cmnd *),
6666 enum ipr_shutdown_type shutdown_type)
6668 struct ipr_cmnd *ipr_cmd;
6670 ioa_cfg->in_reset_reload = 1;
6671 ioa_cfg->allow_cmds = 0;
6672 scsi_block_requests(ioa_cfg->host);
6674 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6675 ioa_cfg->reset_cmd = ipr_cmd;
6676 ipr_cmd->job_step = job_step;
6677 ipr_cmd->u.shutdown_type = shutdown_type;
6679 ipr_reset_ioa_job(ipr_cmd);
6683 * ipr_initiate_ioa_reset - Initiate an adapter reset
6684 * @ioa_cfg: ioa config struct
6685 * @shutdown_type: shutdown type
6687 * Description: This function will initiate the reset of the given adapter.
6688 * If the caller needs to wait on the completion of the reset,
6689 * the caller must sleep on the reset_wait_q.
6694 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6695 enum ipr_shutdown_type shutdown_type)
6697 if (ioa_cfg->ioa_is_dead)
6700 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6701 ioa_cfg->sdt_state = ABORT_DUMP;
6703 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6704 dev_err(&ioa_cfg->pdev->dev,
6705 "IOA taken offline - error recovery failed\n");
6707 ioa_cfg->reset_retries = 0;
6708 ioa_cfg->ioa_is_dead = 1;
6710 if (ioa_cfg->in_ioa_bringdown) {
6711 ioa_cfg->reset_cmd = NULL;
6712 ioa_cfg->in_reset_reload = 0;
6713 ipr_fail_all_ops(ioa_cfg);
6714 wake_up_all(&ioa_cfg->reset_wait_q);
6716 spin_unlock_irq(ioa_cfg->host->host_lock);
6717 scsi_unblock_requests(ioa_cfg->host);
6718 spin_lock_irq(ioa_cfg->host->host_lock);
6721 ioa_cfg->in_ioa_bringdown = 1;
6722 shutdown_type = IPR_SHUTDOWN_NONE;
6726 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6731 * ipr_reset_freeze - Hold off all I/O activity
6732 * @ipr_cmd: ipr command struct
6734 * Description: If the PCI slot is frozen, hold off all I/O
6735 * activity; then, as soon as the slot is available again,
6736 * initiate an adapter reset.
6738 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6740 /* Disallow new interrupts, avoid loop */
6741 ipr_cmd->ioa_cfg->allow_interrupts = 0;
6742 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6743 ipr_cmd->done = ipr_reset_ioa_job;
6744 return IPR_RC_JOB_RETURN;
6748 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6749 * @pdev: PCI device struct
6751 * Description: This routine is called to tell us that the PCI bus
6752 * is down. Can't do anything here, except put the device driver
6753 * into a holding pattern, waiting for the PCI bus to come back.
6755 static void ipr_pci_frozen(struct pci_dev *pdev)
6757 unsigned long flags = 0;
6758 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6760 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6761 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6762 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6766 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6767 * @pdev: PCI device struct
6769 * Description: This routine is called by the pci error recovery
6770 * code after the PCI slot has been reset, just before we
6771 * should resume normal operations.
6773 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6775 unsigned long flags = 0;
6776 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6779 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6781 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6782 return PCI_ERS_RESULT_RECOVERED;
6786 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6787 * @pdev: PCI device struct
6789 * Description: This routine is called when the PCI bus has
6790 * permanently failed.
6792 static void ipr_pci_perm_failure(struct pci_dev *pdev)
6794 unsigned long flags = 0;
6795 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6797 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6798 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6799 ioa_cfg->sdt_state = ABORT_DUMP;
6800 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6801 ioa_cfg->in_ioa_bringdown = 1;
6802 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6803 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6807 * ipr_pci_error_detected - Called when a PCI error is detected.
6808 * @pdev: PCI device struct
6809 * @state: PCI channel state
6811 * Description: Called when a PCI error is detected.
6814 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6816 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6817 pci_channel_state_t state)
6820 case pci_channel_io_frozen:
6821 ipr_pci_frozen(pdev);
6822 return PCI_ERS_RESULT_NEED_RESET;
6823 case pci_channel_io_perm_failure:
6824 ipr_pci_perm_failure(pdev);
6825 return PCI_ERS_RESULT_DISCONNECT;
6830 return PCI_ERS_RESULT_NEED_RESET;
6834 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6835 * @ioa_cfg: ioa cfg struct
6837 * Description: This is the second phase of adapter intialization
6838 * This function takes care of initilizing the adapter to the point
6839 * where it can accept new commands.
6842 * 0 on sucess / -EIO on failure
6844 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6847 unsigned long host_lock_flags = 0;
6850 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6851 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6852 if (ioa_cfg->needs_hard_reset) {
6853 ioa_cfg->needs_hard_reset = 0;
6854 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6856 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6859 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6860 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6861 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6863 if (ioa_cfg->ioa_is_dead) {
6865 } else if (ipr_invalid_adapter(ioa_cfg)) {
6869 dev_err(&ioa_cfg->pdev->dev,
6870 "Adapter not supported in this hardware configuration.\n");
6873 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6880 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6881 * @ioa_cfg: ioa config struct
6886 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6890 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6891 if (ioa_cfg->ipr_cmnd_list[i])
6892 pci_pool_free(ioa_cfg->ipr_cmd_pool,
6893 ioa_cfg->ipr_cmnd_list[i],
6894 ioa_cfg->ipr_cmnd_list_dma[i]);
6896 ioa_cfg->ipr_cmnd_list[i] = NULL;
6899 if (ioa_cfg->ipr_cmd_pool)
6900 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6902 ioa_cfg->ipr_cmd_pool = NULL;
6906 * ipr_free_mem - Frees memory allocated for an adapter
6907 * @ioa_cfg: ioa cfg struct
6912 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6916 kfree(ioa_cfg->res_entries);
6917 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6918 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6919 ipr_free_cmd_blks(ioa_cfg);
6920 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6921 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6922 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6924 ioa_cfg->cfg_table_dma);
6926 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6927 pci_free_consistent(ioa_cfg->pdev,
6928 sizeof(struct ipr_hostrcb),
6929 ioa_cfg->hostrcb[i],
6930 ioa_cfg->hostrcb_dma[i]);
6933 ipr_free_dump(ioa_cfg);
6934 kfree(ioa_cfg->trace);
6938 * ipr_free_all_resources - Free all allocated resources for an adapter.
6939 * @ipr_cmd: ipr command struct
6941 * This function frees all allocated resources for the
6942 * specified adapter.
6947 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6949 struct pci_dev *pdev = ioa_cfg->pdev;
6952 free_irq(pdev->irq, ioa_cfg);
6953 iounmap(ioa_cfg->hdw_dma_regs);
6954 pci_release_regions(pdev);
6955 ipr_free_mem(ioa_cfg);
6956 scsi_host_put(ioa_cfg->host);
6957 pci_disable_device(pdev);
6962 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6963 * @ioa_cfg: ioa config struct
6966 * 0 on success / -ENOMEM on allocation failure
6968 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6970 struct ipr_cmnd *ipr_cmd;
6971 struct ipr_ioarcb *ioarcb;
6972 dma_addr_t dma_addr;
6975 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6976 sizeof(struct ipr_cmnd), 8, 0);
6978 if (!ioa_cfg->ipr_cmd_pool)
6981 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6982 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
6985 ipr_free_cmd_blks(ioa_cfg);
6989 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6990 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6991 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6993 ioarcb = &ipr_cmd->ioarcb;
6994 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6995 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6996 ioarcb->write_ioadl_addr =
6997 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6998 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6999 ioarcb->ioasa_host_pci_addr =
7000 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7001 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7002 ipr_cmd->cmd_index = i;
7003 ipr_cmd->ioa_cfg = ioa_cfg;
7004 ipr_cmd->sense_buffer_dma = dma_addr +
7005 offsetof(struct ipr_cmnd, sense_buffer);
7007 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7014 * ipr_alloc_mem - Allocate memory for an adapter
7015 * @ioa_cfg: ioa config struct
7018 * 0 on success / non-zero for error
7020 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7022 struct pci_dev *pdev = ioa_cfg->pdev;
7023 int i, rc = -ENOMEM;
7026 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7027 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7029 if (!ioa_cfg->res_entries)
7032 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7033 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7035 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7036 sizeof(struct ipr_misc_cbs),
7037 &ioa_cfg->vpd_cbs_dma);
7039 if (!ioa_cfg->vpd_cbs)
7040 goto out_free_res_entries;
7042 if (ipr_alloc_cmd_blks(ioa_cfg))
7043 goto out_free_vpd_cbs;
7045 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7046 sizeof(u32) * IPR_NUM_CMD_BLKS,
7047 &ioa_cfg->host_rrq_dma);
7049 if (!ioa_cfg->host_rrq)
7050 goto out_ipr_free_cmd_blocks;
7052 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7053 sizeof(struct ipr_config_table),
7054 &ioa_cfg->cfg_table_dma);
7056 if (!ioa_cfg->cfg_table)
7057 goto out_free_host_rrq;
7059 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7060 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7061 sizeof(struct ipr_hostrcb),
7062 &ioa_cfg->hostrcb_dma[i]);
7064 if (!ioa_cfg->hostrcb[i])
7065 goto out_free_hostrcb_dma;
7067 ioa_cfg->hostrcb[i]->hostrcb_dma =
7068 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7069 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7070 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7073 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7074 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7076 if (!ioa_cfg->trace)
7077 goto out_free_hostrcb_dma;
7084 out_free_hostrcb_dma:
7086 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7087 ioa_cfg->hostrcb[i],
7088 ioa_cfg->hostrcb_dma[i]);
7090 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7091 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7093 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7094 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7095 out_ipr_free_cmd_blocks:
7096 ipr_free_cmd_blks(ioa_cfg);
7098 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7099 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7100 out_free_res_entries:
7101 kfree(ioa_cfg->res_entries);
7106 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7107 * @ioa_cfg: ioa config struct
7112 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7116 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7117 ioa_cfg->bus_attr[i].bus = i;
7118 ioa_cfg->bus_attr[i].qas_enabled = 0;
7119 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7120 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7121 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7123 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7128 * ipr_init_ioa_cfg - Initialize IOA config struct
7129 * @ioa_cfg: ioa config struct
7130 * @host: scsi host struct
7131 * @pdev: PCI dev struct
7136 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7137 struct Scsi_Host *host, struct pci_dev *pdev)
7139 const struct ipr_interrupt_offsets *p;
7140 struct ipr_interrupts *t;
7143 ioa_cfg->host = host;
7144 ioa_cfg->pdev = pdev;
7145 ioa_cfg->log_level = ipr_log_level;
7146 ioa_cfg->doorbell = IPR_DOORBELL;
7147 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7148 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7149 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7150 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7151 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7152 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7153 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7154 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7156 INIT_LIST_HEAD(&ioa_cfg->free_q);
7157 INIT_LIST_HEAD(&ioa_cfg->pending_q);
7158 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7159 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7160 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7161 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7162 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7163 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7164 ioa_cfg->sdt_state = INACTIVE;
7165 if (ipr_enable_cache)
7166 ioa_cfg->cache_state = CACHE_ENABLED;
7168 ioa_cfg->cache_state = CACHE_DISABLED;
7170 ipr_initialize_bus_attr(ioa_cfg);
7172 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7173 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7174 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7175 host->unique_id = host->host_no;
7176 host->max_cmd_len = IPR_MAX_CDB_LEN;
7177 pci_set_drvdata(pdev, ioa_cfg);
7179 p = &ioa_cfg->chip_cfg->regs;
7181 base = ioa_cfg->hdw_dma_regs;
7183 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7184 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7185 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7186 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7187 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7188 t->ioarrin_reg = base + p->ioarrin_reg;
7189 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7190 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7191 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7195 * ipr_get_chip_cfg - Find adapter chip configuration
7196 * @dev_id: PCI device id struct
7199 * ptr to chip config on success / NULL on failure
7201 static const struct ipr_chip_cfg_t * __devinit
7202 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7206 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7207 if (ipr_chip[i].vendor == dev_id->vendor &&
7208 ipr_chip[i].device == dev_id->device)
7209 return ipr_chip[i].cfg;
7214 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7215 * @pdev: PCI device struct
7216 * @dev_id: PCI device id struct
7219 * 0 on success / non-zero on failure
7221 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7222 const struct pci_device_id *dev_id)
7224 struct ipr_ioa_cfg *ioa_cfg;
7225 struct Scsi_Host *host;
7226 unsigned long ipr_regs_pci;
7227 void __iomem *ipr_regs;
7228 int rc = PCIBIOS_SUCCESSFUL;
7229 volatile u32 mask, uproc;
7233 if ((rc = pci_enable_device(pdev))) {
7234 dev_err(&pdev->dev, "Cannot enable adapter\n");
7238 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7240 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7243 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7248 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7249 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7250 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7251 sata_port_info.flags, &ipr_sata_ops);
7253 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7255 if (!ioa_cfg->chip_cfg) {
7256 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7257 dev_id->vendor, dev_id->device);
7258 goto out_scsi_host_put;
7261 if (ipr_transop_timeout)
7262 ioa_cfg->transop_timeout = ipr_transop_timeout;
7263 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7264 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7266 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7268 ipr_regs_pci = pci_resource_start(pdev, 0);
7270 rc = pci_request_regions(pdev, IPR_NAME);
7273 "Couldn't register memory range of registers\n");
7274 goto out_scsi_host_put;
7277 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7281 "Couldn't map memory range of registers\n");
7283 goto out_release_regions;
7286 ioa_cfg->hdw_dma_regs = ipr_regs;
7287 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7288 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7290 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7292 pci_set_master(pdev);
7294 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7296 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7300 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7301 ioa_cfg->chip_cfg->cache_line_size);
7303 if (rc != PCIBIOS_SUCCESSFUL) {
7304 dev_err(&pdev->dev, "Write of cache line size failed\n");
7309 /* Save away PCI config space for use following IOA reset */
7310 rc = pci_save_state(pdev);
7312 if (rc != PCIBIOS_SUCCESSFUL) {
7313 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7318 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7321 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7324 rc = ipr_alloc_mem(ioa_cfg);
7327 "Couldn't allocate enough memory for device driver!\n");
7332 * If HRRQ updated interrupt is not masked, or reset alert is set,
7333 * the card is in an unknown state and needs a hard reset
7335 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7336 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7337 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7338 ioa_cfg->needs_hard_reset = 1;
7340 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7341 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7344 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7349 spin_lock(&ipr_driver_lock);
7350 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7351 spin_unlock(&ipr_driver_lock);
7358 ipr_free_mem(ioa_cfg);
7361 out_release_regions:
7362 pci_release_regions(pdev);
7364 scsi_host_put(host);
7366 pci_disable_device(pdev);
7371 * ipr_scan_vsets - Scans for VSET devices
7372 * @ioa_cfg: ioa config struct
7374 * Description: Since the VSET resources do not follow SAM in that we can have
7375 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7380 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7384 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7385 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7386 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7390 * ipr_initiate_ioa_bringdown - Bring down an adapter
7391 * @ioa_cfg: ioa config struct
7392 * @shutdown_type: shutdown type
7394 * Description: This function will initiate bringing down the adapter.
7395 * This consists of issuing an IOA shutdown to the adapter
7396 * to flush the cache, and running BIST.
7397 * If the caller needs to wait on the completion of the reset,
7398 * the caller must sleep on the reset_wait_q.
7403 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7404 enum ipr_shutdown_type shutdown_type)
7407 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7408 ioa_cfg->sdt_state = ABORT_DUMP;
7409 ioa_cfg->reset_retries = 0;
7410 ioa_cfg->in_ioa_bringdown = 1;
7411 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7416 * __ipr_remove - Remove a single adapter
7417 * @pdev: pci device struct
7419 * Adapter hot plug remove entry point.
7424 static void __ipr_remove(struct pci_dev *pdev)
7426 unsigned long host_lock_flags = 0;
7427 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7430 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7431 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7434 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7435 flush_scheduled_work();
7436 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7438 spin_lock(&ipr_driver_lock);
7439 list_del(&ioa_cfg->queue);
7440 spin_unlock(&ipr_driver_lock);
7442 if (ioa_cfg->sdt_state == ABORT_DUMP)
7443 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7446 ipr_free_all_resources(ioa_cfg);
7452 * ipr_remove - IOA hot plug remove entry point
7453 * @pdev: pci device struct
7455 * Adapter hot plug remove entry point.
7460 static void ipr_remove(struct pci_dev *pdev)
7462 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7466 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7468 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7470 scsi_remove_host(ioa_cfg->host);
7478 * ipr_probe - Adapter hot plug add entry point
7481 * 0 on success / non-zero on failure
7483 static int __devinit ipr_probe(struct pci_dev *pdev,
7484 const struct pci_device_id *dev_id)
7486 struct ipr_ioa_cfg *ioa_cfg;
7489 rc = ipr_probe_ioa(pdev, dev_id);
7494 ioa_cfg = pci_get_drvdata(pdev);
7495 rc = ipr_probe_ioa_part2(ioa_cfg);
7502 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7509 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7513 scsi_remove_host(ioa_cfg->host);
7518 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7522 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7524 scsi_remove_host(ioa_cfg->host);
7529 scsi_scan_host(ioa_cfg->host);
7530 ipr_scan_vsets(ioa_cfg);
7531 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7532 ioa_cfg->allow_ml_add_del = 1;
7533 ioa_cfg->host->max_channel = IPR_VSET_BUS;
7534 schedule_work(&ioa_cfg->work_q);
7539 * ipr_shutdown - Shutdown handler.
7540 * @pdev: pci device struct
7542 * This function is invoked upon system shutdown/reboot. It will issue
7543 * an adapter shutdown to the adapter to flush the write cache.
7548 static void ipr_shutdown(struct pci_dev *pdev)
7550 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7551 unsigned long lock_flags = 0;
7553 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7554 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7556 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7559 static struct pci_device_id ipr_pci_table[] __devinitdata = {
7560 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7562 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7563 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7564 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7566 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7567 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7568 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7569 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7570 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7571 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7572 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7573 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7574 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7575 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7576 IPR_USE_LONG_TRANSOP_TIMEOUT },
7577 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7578 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7579 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7580 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7581 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7582 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7583 IPR_USE_LONG_TRANSOP_TIMEOUT },
7584 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7585 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7586 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7587 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7588 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7589 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7590 IPR_USE_LONG_TRANSOP_TIMEOUT },
7591 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7592 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
7593 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7594 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7595 IPR_USE_LONG_TRANSOP_TIMEOUT },
7596 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7597 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7598 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7599 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7600 IPR_USE_LONG_TRANSOP_TIMEOUT },
7601 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7602 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7603 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7604 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7605 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7606 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7607 IPR_USE_LONG_TRANSOP_TIMEOUT },
7608 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7609 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7610 IPR_USE_LONG_TRANSOP_TIMEOUT },
7611 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7612 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7613 IPR_USE_LONG_TRANSOP_TIMEOUT },
7616 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7618 static struct pci_error_handlers ipr_err_handler = {
7619 .error_detected = ipr_pci_error_detected,
7620 .slot_reset = ipr_pci_slot_reset,
7623 static struct pci_driver ipr_driver = {
7625 .id_table = ipr_pci_table,
7627 .remove = ipr_remove,
7628 .shutdown = ipr_shutdown,
7629 .err_handler = &ipr_err_handler,
7633 * ipr_init - Module entry point
7636 * 0 on success / negative value on failure
7638 static int __init ipr_init(void)
7640 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7641 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7643 return pci_register_driver(&ipr_driver);
7647 * ipr_exit - Module unload
7649 * Module unload entry point.
7654 static void __exit ipr_exit(void)
7656 pci_unregister_driver(&ipr_driver);
7659 module_init(ipr_init);
7660 module_exit(ipr_exit);