2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
87 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89 static unsigned int ipr_max_speed = 1;
90 static int ipr_testmode = 0;
91 static unsigned int ipr_fastfail = 0;
92 static unsigned int ipr_transop_timeout = 0;
93 static unsigned int ipr_enable_cache = 1;
94 static unsigned int ipr_debug = 0;
95 static DEFINE_SPINLOCK(ipr_driver_lock);
97 /* This table describes the differences between DMA controller chips */
98 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101 .cache_line_size = 0x20,
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
114 { /* Snipe and Scamp */
116 .cache_line_size = 0x20,
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
131 static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
141 static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147 module_param_named(max_speed, ipr_max_speed, uint, 0);
148 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149 module_param_named(log_level, ipr_log_level, uint, 0);
150 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151 module_param_named(testmode, ipr_testmode, int, 0);
152 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153 module_param_named(fastfail, ipr_fastfail, int, 0);
154 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
157 module_param_named(enable_cache, ipr_enable_cache, int, 0);
158 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159 module_param_named(debug, ipr_debug, int, 0);
160 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
161 MODULE_LICENSE("GPL");
162 MODULE_VERSION(IPR_DRIVER_VERSION);
164 /* A constant array of IOASCs/URCs/Error Messages */
166 struct ipr_error_table_t ipr_error_table[] = {
167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
168 "8155: An unknown error was received"},
170 "Soft underlength error"},
172 "Command to be cancelled not found"},
174 "Qualified success"},
175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
176 "FFFE: Soft device bus error recovered by the IOA"},
177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
178 "4101: Soft device bus fabric error"},
179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
180 "FFF9: Device sector reassign successful"},
181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
182 "FFF7: Media error recovered by device rewrite procedures"},
183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
184 "7001: IOA sector reassignment successful"},
185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
186 "FFF9: Soft media error. Sector reassignment recommended"},
187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
188 "FFF7: Media error recovered by IOA rewrite procedures"},
189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
190 "FF3D: Soft PCI bus error recovered by the IOA"},
191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
192 "FFF6: Device hardware error recovered by the IOA"},
193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
194 "FFF6: Device hardware error recovered by the device"},
195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
196 "FF3D: Soft IOA error recovered by the IOA"},
197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
198 "FFFA: Undefined device response recovered by the IOA"},
199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
200 "FFF6: Device bus error, message or command phase"},
201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
202 "FFFE: Task Management Function failed"},
203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
204 "FFF6: Failure prediction threshold exceeded"},
205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
206 "8009: Impending cache battery pack failure"},
208 "34FF: Disk device format in progress"},
209 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
210 "9070: IOA requested reset"},
212 "Synchronization required"},
214 "No ready, IOA shutdown"},
216 "Not ready, IOA has been shutdown"},
217 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
218 "3020: Storage subsystem configuration error"},
220 "FFF5: Medium error, data unreadable, recommend reassign"},
222 "7000: Medium error, data unreadable, do not reassign"},
223 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
224 "FFF3: Disk media format bad"},
225 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
226 "3002: Addressed device failed to respond to selection"},
227 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
228 "3100: Device bus error"},
229 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
230 "3109: IOA timed out a device command"},
232 "3120: SCSI bus is not operational"},
233 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
234 "4100: Hard device bus fabric error"},
235 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
236 "9000: IOA reserved area data check"},
237 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
238 "9001: IOA reserved area invalid data pattern"},
239 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
240 "9002: IOA reserved area LRC error"},
241 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
242 "102E: Out of alternate sectors for disk storage"},
243 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
244 "FFF4: Data transfer underlength error"},
245 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
246 "FFF4: Data transfer overlength error"},
247 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
248 "3400: Logical unit failure"},
249 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
250 "FFF4: Device microcode is corrupt"},
251 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
252 "8150: PCI bus error"},
254 "Unsupported device bus message received"},
255 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
256 "FFF4: Disk device problem"},
257 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
258 "8150: Permanent IOA failure"},
259 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
260 "3010: Disk device returned wrong response to IOA"},
261 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
262 "8151: IOA microcode error"},
264 "Device bus status error"},
265 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
266 "8157: IOA error requiring IOA reset to recover"},
268 "ATA device status error"},
270 "Message reject received from the device"},
271 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
272 "8008: A permanent cache battery pack failure occurred"},
273 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
274 "9090: Disk unit has been modified after the last known status"},
275 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
276 "9081: IOA detected device error"},
277 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
278 "9082: IOA detected device error"},
279 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280 "3110: Device bus error, message or command phase"},
281 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
282 "3110: SAS Command / Task Management Function failed"},
283 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9091: Incorrect hardware configuration change has been detected"},
285 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
286 "9073: Invalid multi-adapter configuration"},
287 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
288 "4010: Incorrect connection between cascaded expanders"},
289 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
290 "4020: Connections exceed IOA design limits"},
291 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
292 "4030: Incorrect multipath connection"},
293 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
294 "4110: Unsupported enclosure function"},
295 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
296 "FFF4: Command to logical unit failed"},
298 "Illegal request, invalid request type or request packet"},
300 "Illegal request, invalid resource handle"},
302 "Illegal request, commands not allowed to this device"},
304 "Illegal request, command not allowed to a secondary adapter"},
306 "Illegal request, invalid field in parameter list"},
308 "Illegal request, parameter not supported"},
310 "Illegal request, parameter value invalid"},
312 "Illegal request, command sequence error"},
314 "Illegal request, dual adapter support not enabled"},
315 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
316 "9031: Array protection temporarily suspended, protection resuming"},
317 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
318 "9040: Array protection temporarily suspended, protection resuming"},
319 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "3140: Device bus not ready to ready transition"},
321 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
322 "FFFB: SCSI bus was reset"},
324 "FFFE: SCSI bus transition to single ended"},
326 "FFFE: SCSI bus transition to LVD"},
327 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
328 "FFFB: SCSI bus was reset by another initiator"},
329 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
330 "3029: A device replacement has occurred"},
331 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
332 "9051: IOA cache data exists for a missing or failed device"},
333 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
334 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
335 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
336 "9025: Disk unit is not supported at its physical location"},
337 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
338 "3020: IOA detected a SCSI bus configuration error"},
339 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
340 "3150: SCSI bus configuration error"},
341 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
342 "9074: Asymmetric advanced function disk configuration"},
343 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
344 "4040: Incomplete multipath connection between IOA and enclosure"},
345 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
346 "4041: Incomplete multipath connection between enclosure and device"},
347 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
348 "9075: Incomplete multipath connection between IOA and remote IOA"},
349 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
350 "9076: Configuration error, missing remote IOA"},
351 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
352 "4050: Enclosure does not support a required multipath function"},
353 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
354 "9041: Array protection temporarily suspended"},
355 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
356 "9042: Corrupt array parity detected on specified device"},
357 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
358 "9030: Array no longer protected due to missing or failed disk unit"},
359 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
360 "9071: Link operational transition"},
361 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
362 "9072: Link not operational transition"},
363 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
364 "9032: Array exposed but still protected"},
365 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
366 "70DD: Device forced failed by disrupt device command"},
367 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
368 "4061: Multipath redundancy level got better"},
369 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
370 "4060: Multipath redundancy level got worse"},
372 "Failure due to other device"},
373 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
374 "9008: IOA does not support functions expected by devices"},
375 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
376 "9010: Cache data associated with attached devices cannot be found"},
377 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
378 "9011: Cache data belongs to devices other than those attached"},
379 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
380 "9020: Array missing 2 or more devices with only 1 device present"},
381 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9021: Array missing 2 or more devices with 2 or more devices present"},
383 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
384 "9022: Exposed array is missing a required device"},
385 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
386 "9023: Array member(s) not at required physical locations"},
387 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
388 "9024: Array not functional due to present hardware configuration"},
389 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
390 "9026: Array not functional due to present hardware configuration"},
391 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
392 "9027: Array is missing a device and parity is out of sync"},
393 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
394 "9028: Maximum number of arrays already exist"},
395 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
396 "9050: Required cache data cannot be located for a disk unit"},
397 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
398 "9052: Cache data exists for a device that has been modified"},
399 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
400 "9054: IOA resources not available due to previous problems"},
401 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
402 "9092: Disk unit requires initialization before use"},
403 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
404 "9029: Incorrect hardware configuration change has been detected"},
405 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
406 "9060: One or more disk pairs are missing from an array"},
407 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
408 "9061: One or more disks are missing from an array"},
409 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
410 "9062: One or more disks are missing from an array"},
411 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
412 "9063: Maximum number of functional arrays has been exceeded"},
414 "Aborted command, invalid descriptor"},
416 "Command terminated by host"}
419 static const struct ipr_ses_table_entry ipr_ses_table[] = {
420 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
421 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
422 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
423 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
424 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
425 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
426 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
427 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
428 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
429 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
430 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
431 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
432 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
436 * Function Prototypes
438 static int ipr_reset_alert(struct ipr_cmnd *);
439 static void ipr_process_ccn(struct ipr_cmnd *);
440 static void ipr_process_error(struct ipr_cmnd *);
441 static void ipr_reset_ioa_job(struct ipr_cmnd *);
442 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
443 enum ipr_shutdown_type);
445 #ifdef CONFIG_SCSI_IPR_TRACE
447 * ipr_trc_hook - Add a trace entry to the driver trace
448 * @ipr_cmd: ipr command struct
450 * @add_data: additional data
455 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
456 u8 type, u32 add_data)
458 struct ipr_trace_entry *trace_entry;
459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
461 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
462 trace_entry->time = jiffies;
463 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
464 trace_entry->type = type;
465 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
466 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
467 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
468 trace_entry->u.add_data = add_data;
471 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
475 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
476 * @ipr_cmd: ipr command struct
481 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
483 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
484 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
485 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
487 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
488 ioarcb->write_data_transfer_length = 0;
489 ioarcb->read_data_transfer_length = 0;
490 ioarcb->write_ioadl_len = 0;
491 ioarcb->read_ioadl_len = 0;
492 ioarcb->write_ioadl_addr =
493 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
494 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
496 ioasa->residual_data_len = 0;
497 ioasa->u.gata.status = 0;
499 ipr_cmd->scsi_cmd = NULL;
501 ipr_cmd->sense_buffer[0] = 0;
502 ipr_cmd->dma_use_sg = 0;
506 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
507 * @ipr_cmd: ipr command struct
512 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
514 ipr_reinit_ipr_cmnd(ipr_cmd);
515 ipr_cmd->u.scratch = 0;
516 ipr_cmd->sibling = NULL;
517 init_timer(&ipr_cmd->timer);
521 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
522 * @ioa_cfg: ioa config struct
525 * pointer to ipr command struct
528 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
530 struct ipr_cmnd *ipr_cmd;
532 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
533 list_del(&ipr_cmd->queue);
534 ipr_init_ipr_cmnd(ipr_cmd);
540 * ipr_unmap_sglist - Unmap scatterlist if mapped
541 * @ioa_cfg: ioa config struct
542 * @ipr_cmd: ipr command struct
547 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
548 struct ipr_cmnd *ipr_cmd)
550 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
552 if (ipr_cmd->dma_use_sg) {
553 if (scsi_cmd->use_sg > 0) {
554 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
556 scsi_cmd->sc_data_direction);
558 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
559 scsi_cmd->request_bufflen,
560 scsi_cmd->sc_data_direction);
566 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
567 * @ioa_cfg: ioa config struct
568 * @clr_ints: interrupts to clear
570 * This function masks all interrupts on the adapter, then clears the
571 * interrupts specified in the mask
576 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
579 volatile u32 int_reg;
581 /* Stop new interrupts */
582 ioa_cfg->allow_interrupts = 0;
584 /* Set interrupt mask to stop all new interrupts */
585 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
587 /* Clear any pending interrupts */
588 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
589 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
593 * ipr_save_pcix_cmd_reg - Save PCI-X command register
594 * @ioa_cfg: ioa config struct
597 * 0 on success / -EIO on failure
599 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
601 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
603 if (pcix_cmd_reg == 0)
606 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
607 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
608 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
612 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
617 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
618 * @ioa_cfg: ioa config struct
621 * 0 on success / -EIO on failure
623 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
625 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
628 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
629 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
630 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
639 * ipr_sata_eh_done - done function for aborted SATA commands
640 * @ipr_cmd: ipr command struct
642 * This function is invoked for ops generated to SATA
643 * devices which are being aborted.
648 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
651 struct ata_queued_cmd *qc = ipr_cmd->qc;
652 struct ipr_sata_port *sata_port = qc->ap->private_data;
654 qc->err_mask |= AC_ERR_OTHER;
655 sata_port->ioasa.status |= ATA_BUSY;
656 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
661 * ipr_scsi_eh_done - mid-layer done function for aborted ops
662 * @ipr_cmd: ipr command struct
664 * This function is invoked by the interrupt handler for
665 * ops generated by the SCSI mid-layer which are being aborted.
670 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
673 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
675 scsi_cmd->result |= (DID_ERROR << 16);
677 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
678 scsi_cmd->scsi_done(scsi_cmd);
679 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
683 * ipr_fail_all_ops - Fails all outstanding ops.
684 * @ioa_cfg: ioa config struct
686 * This function fails all outstanding ops.
691 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
693 struct ipr_cmnd *ipr_cmd, *temp;
696 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
697 list_del(&ipr_cmd->queue);
699 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
700 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
702 if (ipr_cmd->scsi_cmd)
703 ipr_cmd->done = ipr_scsi_eh_done;
704 else if (ipr_cmd->qc)
705 ipr_cmd->done = ipr_sata_eh_done;
707 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
708 del_timer(&ipr_cmd->timer);
709 ipr_cmd->done(ipr_cmd);
716 * ipr_do_req - Send driver initiated requests.
717 * @ipr_cmd: ipr command struct
718 * @done: done function
719 * @timeout_func: timeout function
720 * @timeout: timeout value
722 * This function sends the specified command to the adapter with the
723 * timeout given. The done function is invoked on command completion.
728 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
729 void (*done) (struct ipr_cmnd *),
730 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
732 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
734 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
736 ipr_cmd->done = done;
738 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
739 ipr_cmd->timer.expires = jiffies + timeout;
740 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
742 add_timer(&ipr_cmd->timer);
744 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
747 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
748 ioa_cfg->regs.ioarrin_reg);
752 * ipr_internal_cmd_done - Op done function for an internally generated op.
753 * @ipr_cmd: ipr command struct
755 * This function is the op done function for an internally generated,
756 * blocking op. It simply wakes the sleeping thread.
761 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
763 if (ipr_cmd->sibling)
764 ipr_cmd->sibling = NULL;
766 complete(&ipr_cmd->completion);
770 * ipr_send_blocking_cmd - Send command and sleep on its completion.
771 * @ipr_cmd: ipr command struct
772 * @timeout_func: function to invoke if command times out
778 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
779 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
782 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
784 init_completion(&ipr_cmd->completion);
785 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
787 spin_unlock_irq(ioa_cfg->host->host_lock);
788 wait_for_completion(&ipr_cmd->completion);
789 spin_lock_irq(ioa_cfg->host->host_lock);
793 * ipr_send_hcam - Send an HCAM to the adapter.
794 * @ioa_cfg: ioa config struct
796 * @hostrcb: hostrcb struct
798 * This function will send a Host Controlled Async command to the adapter.
799 * If HCAMs are currently not allowed to be issued to the adapter, it will
800 * place the hostrcb on the free queue.
805 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
806 struct ipr_hostrcb *hostrcb)
808 struct ipr_cmnd *ipr_cmd;
809 struct ipr_ioarcb *ioarcb;
811 if (ioa_cfg->allow_cmds) {
812 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
813 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
814 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
816 ipr_cmd->u.hostrcb = hostrcb;
817 ioarcb = &ipr_cmd->ioarcb;
819 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
820 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
821 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
822 ioarcb->cmd_pkt.cdb[1] = type;
823 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
824 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
826 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
827 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
828 ipr_cmd->ioadl[0].flags_and_data_len =
829 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
830 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
832 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
833 ipr_cmd->done = ipr_process_ccn;
835 ipr_cmd->done = ipr_process_error;
837 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
840 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
841 ioa_cfg->regs.ioarrin_reg);
843 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
848 * ipr_init_res_entry - Initialize a resource entry struct.
849 * @res: resource entry struct
854 static void ipr_init_res_entry(struct ipr_resource_entry *res)
856 res->needs_sync_complete = 0;
859 res->del_from_ml = 0;
860 res->resetting_device = 0;
862 res->sata_port = NULL;
866 * ipr_handle_config_change - Handle a config change from the adapter
867 * @ioa_cfg: ioa config struct
873 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
874 struct ipr_hostrcb *hostrcb)
876 struct ipr_resource_entry *res = NULL;
877 struct ipr_config_table_entry *cfgte;
880 cfgte = &hostrcb->hcam.u.ccn.cfgte;
882 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
883 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
884 sizeof(cfgte->res_addr))) {
891 if (list_empty(&ioa_cfg->free_res_q)) {
892 ipr_send_hcam(ioa_cfg,
893 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
898 res = list_entry(ioa_cfg->free_res_q.next,
899 struct ipr_resource_entry, queue);
901 list_del(&res->queue);
902 ipr_init_res_entry(res);
903 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
906 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
908 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
910 res->del_from_ml = 1;
911 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
912 if (ioa_cfg->allow_ml_add_del)
913 schedule_work(&ioa_cfg->work_q);
915 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
916 } else if (!res->sdev) {
918 if (ioa_cfg->allow_ml_add_del)
919 schedule_work(&ioa_cfg->work_q);
922 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
926 * ipr_process_ccn - Op done function for a CCN.
927 * @ipr_cmd: ipr command struct
929 * This function is the op done function for a configuration
930 * change notification host controlled async from the adapter.
935 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
937 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
938 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
939 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
941 list_del(&hostrcb->queue);
942 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
945 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
946 dev_err(&ioa_cfg->pdev->dev,
947 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
949 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
951 ipr_handle_config_change(ioa_cfg, hostrcb);
956 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
957 * @i: index into buffer
958 * @buf: string to modify
960 * This function will strip all trailing whitespace, pad the end
961 * of the string with a single space, and NULL terminate the string.
964 * new length of string
966 static int strip_and_pad_whitespace(int i, char *buf)
968 while (i && buf[i] == ' ')
976 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
977 * @prefix: string to print at start of printk
978 * @hostrcb: hostrcb pointer
979 * @vpd: vendor/product id/sn struct
984 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
987 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
990 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
991 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
993 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
994 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
996 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
997 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
999 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1003 * ipr_log_vpd - Log the passed VPD to the error log.
1004 * @vpd: vendor/product id/sn struct
1009 static void ipr_log_vpd(struct ipr_vpd *vpd)
1011 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1012 + IPR_SERIAL_NUM_LEN];
1014 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1015 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1017 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1018 ipr_err("Vendor/Product ID: %s\n", buffer);
1020 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1021 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1022 ipr_err(" Serial Number: %s\n", buffer);
1026 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1027 * @prefix: string to print at start of printk
1028 * @hostrcb: hostrcb pointer
1029 * @vpd: vendor/product id/sn/wwn struct
1034 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1035 struct ipr_ext_vpd *vpd)
1037 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1038 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1039 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1043 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1044 * @vpd: vendor/product id/sn/wwn struct
1049 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1051 ipr_log_vpd(&vpd->vpd);
1052 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1053 be32_to_cpu(vpd->wwid[1]));
1057 * ipr_log_enhanced_cache_error - Log a cache error.
1058 * @ioa_cfg: ioa config struct
1059 * @hostrcb: hostrcb struct
1064 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1065 struct ipr_hostrcb *hostrcb)
1067 struct ipr_hostrcb_type_12_error *error =
1068 &hostrcb->hcam.u.error.u.type_12_error;
1070 ipr_err("-----Current Configuration-----\n");
1071 ipr_err("Cache Directory Card Information:\n");
1072 ipr_log_ext_vpd(&error->ioa_vpd);
1073 ipr_err("Adapter Card Information:\n");
1074 ipr_log_ext_vpd(&error->cfc_vpd);
1076 ipr_err("-----Expected Configuration-----\n");
1077 ipr_err("Cache Directory Card Information:\n");
1078 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1079 ipr_err("Adapter Card Information:\n");
1080 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1082 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1083 be32_to_cpu(error->ioa_data[0]),
1084 be32_to_cpu(error->ioa_data[1]),
1085 be32_to_cpu(error->ioa_data[2]));
1089 * ipr_log_cache_error - Log a cache error.
1090 * @ioa_cfg: ioa config struct
1091 * @hostrcb: hostrcb struct
1096 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1097 struct ipr_hostrcb *hostrcb)
1099 struct ipr_hostrcb_type_02_error *error =
1100 &hostrcb->hcam.u.error.u.type_02_error;
1102 ipr_err("-----Current Configuration-----\n");
1103 ipr_err("Cache Directory Card Information:\n");
1104 ipr_log_vpd(&error->ioa_vpd);
1105 ipr_err("Adapter Card Information:\n");
1106 ipr_log_vpd(&error->cfc_vpd);
1108 ipr_err("-----Expected Configuration-----\n");
1109 ipr_err("Cache Directory Card Information:\n");
1110 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1111 ipr_err("Adapter Card Information:\n");
1112 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1114 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1115 be32_to_cpu(error->ioa_data[0]),
1116 be32_to_cpu(error->ioa_data[1]),
1117 be32_to_cpu(error->ioa_data[2]));
1121 * ipr_log_enhanced_config_error - Log a configuration error.
1122 * @ioa_cfg: ioa config struct
1123 * @hostrcb: hostrcb struct
1128 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1129 struct ipr_hostrcb *hostrcb)
1131 int errors_logged, i;
1132 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1133 struct ipr_hostrcb_type_13_error *error;
1135 error = &hostrcb->hcam.u.error.u.type_13_error;
1136 errors_logged = be32_to_cpu(error->errors_logged);
1138 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1139 be32_to_cpu(error->errors_detected), errors_logged);
1141 dev_entry = error->dev;
1143 for (i = 0; i < errors_logged; i++, dev_entry++) {
1146 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1147 ipr_log_ext_vpd(&dev_entry->vpd);
1149 ipr_err("-----New Device Information-----\n");
1150 ipr_log_ext_vpd(&dev_entry->new_vpd);
1152 ipr_err("Cache Directory Card Information:\n");
1153 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1155 ipr_err("Adapter Card Information:\n");
1156 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1161 * ipr_log_config_error - Log a configuration error.
1162 * @ioa_cfg: ioa config struct
1163 * @hostrcb: hostrcb struct
1168 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1169 struct ipr_hostrcb *hostrcb)
1171 int errors_logged, i;
1172 struct ipr_hostrcb_device_data_entry *dev_entry;
1173 struct ipr_hostrcb_type_03_error *error;
1175 error = &hostrcb->hcam.u.error.u.type_03_error;
1176 errors_logged = be32_to_cpu(error->errors_logged);
1178 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1179 be32_to_cpu(error->errors_detected), errors_logged);
1181 dev_entry = error->dev;
1183 for (i = 0; i < errors_logged; i++, dev_entry++) {
1186 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1187 ipr_log_vpd(&dev_entry->vpd);
1189 ipr_err("-----New Device Information-----\n");
1190 ipr_log_vpd(&dev_entry->new_vpd);
1192 ipr_err("Cache Directory Card Information:\n");
1193 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1195 ipr_err("Adapter Card Information:\n");
1196 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1198 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1199 be32_to_cpu(dev_entry->ioa_data[0]),
1200 be32_to_cpu(dev_entry->ioa_data[1]),
1201 be32_to_cpu(dev_entry->ioa_data[2]),
1202 be32_to_cpu(dev_entry->ioa_data[3]),
1203 be32_to_cpu(dev_entry->ioa_data[4]));
1208 * ipr_log_enhanced_array_error - Log an array configuration error.
1209 * @ioa_cfg: ioa config struct
1210 * @hostrcb: hostrcb struct
1215 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1216 struct ipr_hostrcb *hostrcb)
1219 struct ipr_hostrcb_type_14_error *error;
1220 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1221 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1223 error = &hostrcb->hcam.u.error.u.type_14_error;
1227 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1228 error->protection_level,
1229 ioa_cfg->host->host_no,
1230 error->last_func_vset_res_addr.bus,
1231 error->last_func_vset_res_addr.target,
1232 error->last_func_vset_res_addr.lun);
1236 array_entry = error->array_member;
1237 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1238 sizeof(error->array_member));
1240 for (i = 0; i < num_entries; i++, array_entry++) {
1241 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1244 if (be32_to_cpu(error->exposed_mode_adn) == i)
1245 ipr_err("Exposed Array Member %d:\n", i);
1247 ipr_err("Array Member %d:\n", i);
1249 ipr_log_ext_vpd(&array_entry->vpd);
1250 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1251 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1252 "Expected Location");
1259 * ipr_log_array_error - Log an array configuration error.
1260 * @ioa_cfg: ioa config struct
1261 * @hostrcb: hostrcb struct
1266 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1267 struct ipr_hostrcb *hostrcb)
1270 struct ipr_hostrcb_type_04_error *error;
1271 struct ipr_hostrcb_array_data_entry *array_entry;
1272 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1274 error = &hostrcb->hcam.u.error.u.type_04_error;
1278 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1279 error->protection_level,
1280 ioa_cfg->host->host_no,
1281 error->last_func_vset_res_addr.bus,
1282 error->last_func_vset_res_addr.target,
1283 error->last_func_vset_res_addr.lun);
1287 array_entry = error->array_member;
1289 for (i = 0; i < 18; i++) {
1290 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1293 if (be32_to_cpu(error->exposed_mode_adn) == i)
1294 ipr_err("Exposed Array Member %d:\n", i);
1296 ipr_err("Array Member %d:\n", i);
1298 ipr_log_vpd(&array_entry->vpd);
1300 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1301 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1302 "Expected Location");
1307 array_entry = error->array_member2;
1314 * ipr_log_hex_data - Log additional hex IOA error data.
1315 * @ioa_cfg: ioa config struct
1316 * @data: IOA error data
1322 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1329 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1330 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1332 for (i = 0; i < len / 4; i += 4) {
1333 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1334 be32_to_cpu(data[i]),
1335 be32_to_cpu(data[i+1]),
1336 be32_to_cpu(data[i+2]),
1337 be32_to_cpu(data[i+3]));
1342 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1343 * @ioa_cfg: ioa config struct
1344 * @hostrcb: hostrcb struct
1349 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1350 struct ipr_hostrcb *hostrcb)
1352 struct ipr_hostrcb_type_17_error *error;
1354 error = &hostrcb->hcam.u.error.u.type_17_error;
1355 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1356 strstrip(error->failure_reason);
1358 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1359 be32_to_cpu(hostrcb->hcam.u.error.prc));
1360 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1361 ipr_log_hex_data(ioa_cfg, error->data,
1362 be32_to_cpu(hostrcb->hcam.length) -
1363 (offsetof(struct ipr_hostrcb_error, u) +
1364 offsetof(struct ipr_hostrcb_type_17_error, data)));
1368 * ipr_log_dual_ioa_error - Log a dual adapter error.
1369 * @ioa_cfg: ioa config struct
1370 * @hostrcb: hostrcb struct
1375 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1376 struct ipr_hostrcb *hostrcb)
1378 struct ipr_hostrcb_type_07_error *error;
1380 error = &hostrcb->hcam.u.error.u.type_07_error;
1381 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1382 strstrip(error->failure_reason);
1384 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1385 be32_to_cpu(hostrcb->hcam.u.error.prc));
1386 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1387 ipr_log_hex_data(ioa_cfg, error->data,
1388 be32_to_cpu(hostrcb->hcam.length) -
1389 (offsetof(struct ipr_hostrcb_error, u) +
1390 offsetof(struct ipr_hostrcb_type_07_error, data)));
1393 static const struct {
1396 } path_active_desc[] = {
1397 { IPR_PATH_NO_INFO, "Path" },
1398 { IPR_PATH_ACTIVE, "Active path" },
1399 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1402 static const struct {
1405 } path_state_desc[] = {
1406 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1407 { IPR_PATH_HEALTHY, "is healthy" },
1408 { IPR_PATH_DEGRADED, "is degraded" },
1409 { IPR_PATH_FAILED, "is failed" }
1413 * ipr_log_fabric_path - Log a fabric path error
1414 * @hostrcb: hostrcb struct
1415 * @fabric: fabric descriptor
1420 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1421 struct ipr_hostrcb_fabric_desc *fabric)
1424 u8 path_state = fabric->path_state;
1425 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1426 u8 state = path_state & IPR_PATH_STATE_MASK;
1428 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1429 if (path_active_desc[i].active != active)
1432 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1433 if (path_state_desc[j].state != state)
1436 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1437 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1438 path_active_desc[i].desc, path_state_desc[j].desc,
1440 } else if (fabric->cascaded_expander == 0xff) {
1441 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1442 path_active_desc[i].desc, path_state_desc[j].desc,
1443 fabric->ioa_port, fabric->phy);
1444 } else if (fabric->phy == 0xff) {
1445 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1446 path_active_desc[i].desc, path_state_desc[j].desc,
1447 fabric->ioa_port, fabric->cascaded_expander);
1449 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1450 path_active_desc[i].desc, path_state_desc[j].desc,
1451 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1457 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1458 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1461 static const struct {
1464 } path_type_desc[] = {
1465 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1466 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1467 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1468 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1471 static const struct {
1474 } path_status_desc[] = {
1475 { IPR_PATH_CFG_NO_PROB, "Functional" },
1476 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1477 { IPR_PATH_CFG_FAILED, "Failed" },
1478 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1479 { IPR_PATH_NOT_DETECTED, "Missing" },
1480 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1483 static const char *link_rate[] = {
1486 "phy reset problem",
1503 * ipr_log_path_elem - Log a fabric path element.
1504 * @hostrcb: hostrcb struct
1505 * @cfg: fabric path element struct
1510 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1511 struct ipr_hostrcb_config_element *cfg)
1514 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1515 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1517 if (type == IPR_PATH_CFG_NOT_EXIST)
1520 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1521 if (path_type_desc[i].type != type)
1524 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1525 if (path_status_desc[j].status != status)
1528 if (type == IPR_PATH_CFG_IOA_PORT) {
1529 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1530 path_status_desc[j].desc, path_type_desc[i].desc,
1531 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1532 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1534 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1535 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1536 path_status_desc[j].desc, path_type_desc[i].desc,
1537 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1538 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1539 } else if (cfg->cascaded_expander == 0xff) {
1540 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1541 "WWN=%08X%08X\n", path_status_desc[j].desc,
1542 path_type_desc[i].desc, cfg->phy,
1543 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1544 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1545 } else if (cfg->phy == 0xff) {
1546 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1547 "WWN=%08X%08X\n", path_status_desc[j].desc,
1548 path_type_desc[i].desc, cfg->cascaded_expander,
1549 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1550 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1552 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1553 "WWN=%08X%08X\n", path_status_desc[j].desc,
1554 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1555 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1556 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1563 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1564 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1565 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1566 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1570 * ipr_log_fabric_error - Log a fabric error.
1571 * @ioa_cfg: ioa config struct
1572 * @hostrcb: hostrcb struct
1577 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1578 struct ipr_hostrcb *hostrcb)
1580 struct ipr_hostrcb_type_20_error *error;
1581 struct ipr_hostrcb_fabric_desc *fabric;
1582 struct ipr_hostrcb_config_element *cfg;
1585 error = &hostrcb->hcam.u.error.u.type_20_error;
1586 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1587 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1589 add_len = be32_to_cpu(hostrcb->hcam.length) -
1590 (offsetof(struct ipr_hostrcb_error, u) +
1591 offsetof(struct ipr_hostrcb_type_20_error, desc));
1593 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1594 ipr_log_fabric_path(hostrcb, fabric);
1595 for_each_fabric_cfg(fabric, cfg)
1596 ipr_log_path_elem(hostrcb, cfg);
1598 add_len -= be16_to_cpu(fabric->length);
1599 fabric = (struct ipr_hostrcb_fabric_desc *)
1600 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1603 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1607 * ipr_log_generic_error - Log an adapter error.
1608 * @ioa_cfg: ioa config struct
1609 * @hostrcb: hostrcb struct
1614 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1615 struct ipr_hostrcb *hostrcb)
1617 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1618 be32_to_cpu(hostrcb->hcam.length));
1622 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1625 * This function will return the index of into the ipr_error_table
1626 * for the specified IOASC. If the IOASC is not in the table,
1627 * 0 will be returned, which points to the entry used for unknown errors.
1630 * index into the ipr_error_table
1632 static u32 ipr_get_error(u32 ioasc)
1636 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1637 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1644 * ipr_handle_log_data - Log an adapter error.
1645 * @ioa_cfg: ioa config struct
1646 * @hostrcb: hostrcb struct
1648 * This function logs an adapter error to the system.
1653 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1654 struct ipr_hostrcb *hostrcb)
1659 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1662 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1663 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1665 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1667 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1668 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1669 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1670 scsi_report_bus_reset(ioa_cfg->host,
1671 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1674 error_index = ipr_get_error(ioasc);
1676 if (!ipr_error_table[error_index].log_hcam)
1679 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1681 /* Set indication we have logged an error */
1682 ioa_cfg->errors_logged++;
1684 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1686 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1687 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1689 switch (hostrcb->hcam.overlay_id) {
1690 case IPR_HOST_RCB_OVERLAY_ID_2:
1691 ipr_log_cache_error(ioa_cfg, hostrcb);
1693 case IPR_HOST_RCB_OVERLAY_ID_3:
1694 ipr_log_config_error(ioa_cfg, hostrcb);
1696 case IPR_HOST_RCB_OVERLAY_ID_4:
1697 case IPR_HOST_RCB_OVERLAY_ID_6:
1698 ipr_log_array_error(ioa_cfg, hostrcb);
1700 case IPR_HOST_RCB_OVERLAY_ID_7:
1701 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1703 case IPR_HOST_RCB_OVERLAY_ID_12:
1704 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1706 case IPR_HOST_RCB_OVERLAY_ID_13:
1707 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1709 case IPR_HOST_RCB_OVERLAY_ID_14:
1710 case IPR_HOST_RCB_OVERLAY_ID_16:
1711 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1713 case IPR_HOST_RCB_OVERLAY_ID_17:
1714 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1716 case IPR_HOST_RCB_OVERLAY_ID_20:
1717 ipr_log_fabric_error(ioa_cfg, hostrcb);
1719 case IPR_HOST_RCB_OVERLAY_ID_1:
1720 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1722 ipr_log_generic_error(ioa_cfg, hostrcb);
1728 * ipr_process_error - Op done function for an adapter error log.
1729 * @ipr_cmd: ipr command struct
1731 * This function is the op done function for an error log host
1732 * controlled async from the adapter. It will log the error and
1733 * send the HCAM back to the adapter.
1738 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1740 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1741 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1742 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1743 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1745 list_del(&hostrcb->queue);
1746 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1749 ipr_handle_log_data(ioa_cfg, hostrcb);
1750 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1751 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1752 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1753 dev_err(&ioa_cfg->pdev->dev,
1754 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1757 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1761 * ipr_timeout - An internally generated op has timed out.
1762 * @ipr_cmd: ipr command struct
1764 * This function blocks host requests and initiates an
1770 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1772 unsigned long lock_flags = 0;
1773 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1776 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1778 ioa_cfg->errors_logged++;
1779 dev_err(&ioa_cfg->pdev->dev,
1780 "Adapter being reset due to command timeout.\n");
1782 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1783 ioa_cfg->sdt_state = GET_DUMP;
1785 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1786 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1793 * ipr_oper_timeout - Adapter timed out transitioning to operational
1794 * @ipr_cmd: ipr command struct
1796 * This function blocks host requests and initiates an
1802 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1804 unsigned long lock_flags = 0;
1805 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1808 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1810 ioa_cfg->errors_logged++;
1811 dev_err(&ioa_cfg->pdev->dev,
1812 "Adapter timed out transitioning to operational.\n");
1814 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1815 ioa_cfg->sdt_state = GET_DUMP;
1817 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1819 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1820 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1823 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1828 * ipr_reset_reload - Reset/Reload the IOA
1829 * @ioa_cfg: ioa config struct
1830 * @shutdown_type: shutdown type
1832 * This function resets the adapter and re-initializes it.
1833 * This function assumes that all new host commands have been stopped.
1837 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1838 enum ipr_shutdown_type shutdown_type)
1840 if (!ioa_cfg->in_reset_reload)
1841 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1843 spin_unlock_irq(ioa_cfg->host->host_lock);
1844 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1845 spin_lock_irq(ioa_cfg->host->host_lock);
1847 /* If we got hit with a host reset while we were already resetting
1848 the adapter for some reason, and the reset failed. */
1849 if (ioa_cfg->ioa_is_dead) {
1858 * ipr_find_ses_entry - Find matching SES in SES table
1859 * @res: resource entry struct of SES
1862 * pointer to SES table entry / NULL on failure
1864 static const struct ipr_ses_table_entry *
1865 ipr_find_ses_entry(struct ipr_resource_entry *res)
1868 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1870 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1871 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1872 if (ste->compare_product_id_byte[j] == 'X') {
1873 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1881 if (matches == IPR_PROD_ID_LEN)
1889 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1890 * @ioa_cfg: ioa config struct
1892 * @bus_width: bus width
1895 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1896 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1897 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1898 * max 160MHz = max 320MB/sec).
1900 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1902 struct ipr_resource_entry *res;
1903 const struct ipr_ses_table_entry *ste;
1904 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1906 /* Loop through each config table entry in the config table buffer */
1907 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1908 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1911 if (bus != res->cfgte.res_addr.bus)
1914 if (!(ste = ipr_find_ses_entry(res)))
1917 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1920 return max_xfer_rate;
1924 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1925 * @ioa_cfg: ioa config struct
1926 * @max_delay: max delay in micro-seconds to wait
1928 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1931 * 0 on success / other on failure
1933 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1935 volatile u32 pcii_reg;
1938 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1939 while (delay < max_delay) {
1940 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1942 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1945 /* udelay cannot be used if delay is more than a few milliseconds */
1946 if ((delay / 1000) > MAX_UDELAY_MS)
1947 mdelay(delay / 1000);
1957 * ipr_get_ldump_data_section - Dump IOA memory
1958 * @ioa_cfg: ioa config struct
1959 * @start_addr: adapter address to dump
1960 * @dest: destination kernel buffer
1961 * @length_in_words: length to dump in 4 byte words
1964 * 0 on success / -EIO on failure
1966 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1968 __be32 *dest, u32 length_in_words)
1970 volatile u32 temp_pcii_reg;
1973 /* Write IOA interrupt reg starting LDUMP state */
1974 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1975 ioa_cfg->regs.set_uproc_interrupt_reg);
1977 /* Wait for IO debug acknowledge */
1978 if (ipr_wait_iodbg_ack(ioa_cfg,
1979 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1980 dev_err(&ioa_cfg->pdev->dev,
1981 "IOA dump long data transfer timeout\n");
1985 /* Signal LDUMP interlocked - clear IO debug ack */
1986 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1987 ioa_cfg->regs.clr_interrupt_reg);
1989 /* Write Mailbox with starting address */
1990 writel(start_addr, ioa_cfg->ioa_mailbox);
1992 /* Signal address valid - clear IOA Reset alert */
1993 writel(IPR_UPROCI_RESET_ALERT,
1994 ioa_cfg->regs.clr_uproc_interrupt_reg);
1996 for (i = 0; i < length_in_words; i++) {
1997 /* Wait for IO debug acknowledge */
1998 if (ipr_wait_iodbg_ack(ioa_cfg,
1999 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2000 dev_err(&ioa_cfg->pdev->dev,
2001 "IOA dump short data transfer timeout\n");
2005 /* Read data from mailbox and increment destination pointer */
2006 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2009 /* For all but the last word of data, signal data received */
2010 if (i < (length_in_words - 1)) {
2011 /* Signal dump data received - Clear IO debug Ack */
2012 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2013 ioa_cfg->regs.clr_interrupt_reg);
2017 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2018 writel(IPR_UPROCI_RESET_ALERT,
2019 ioa_cfg->regs.set_uproc_interrupt_reg);
2021 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2022 ioa_cfg->regs.clr_uproc_interrupt_reg);
2024 /* Signal dump data received - Clear IO debug Ack */
2025 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2026 ioa_cfg->regs.clr_interrupt_reg);
2028 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2029 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2031 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2033 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2043 #ifdef CONFIG_SCSI_IPR_DUMP
2045 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2046 * @ioa_cfg: ioa config struct
2047 * @pci_address: adapter address
2048 * @length: length of data to copy
2050 * Copy data from PCI adapter to kernel buffer.
2051 * Note: length MUST be a 4 byte multiple
2053 * 0 on success / other on failure
2055 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2056 unsigned long pci_address, u32 length)
2058 int bytes_copied = 0;
2059 int cur_len, rc, rem_len, rem_page_len;
2061 unsigned long lock_flags = 0;
2062 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2064 while (bytes_copied < length &&
2065 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2066 if (ioa_dump->page_offset >= PAGE_SIZE ||
2067 ioa_dump->page_offset == 0) {
2068 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2072 return bytes_copied;
2075 ioa_dump->page_offset = 0;
2076 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2077 ioa_dump->next_page_index++;
2079 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2081 rem_len = length - bytes_copied;
2082 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2083 cur_len = min(rem_len, rem_page_len);
2085 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2086 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2089 rc = ipr_get_ldump_data_section(ioa_cfg,
2090 pci_address + bytes_copied,
2091 &page[ioa_dump->page_offset / 4],
2092 (cur_len / sizeof(u32)));
2094 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2097 ioa_dump->page_offset += cur_len;
2098 bytes_copied += cur_len;
2106 return bytes_copied;
2110 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2111 * @hdr: dump entry header struct
2116 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2118 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2120 hdr->offset = sizeof(*hdr);
2121 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2125 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2126 * @ioa_cfg: ioa config struct
2127 * @driver_dump: driver dump struct
2132 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2133 struct ipr_driver_dump *driver_dump)
2135 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2137 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2138 driver_dump->ioa_type_entry.hdr.len =
2139 sizeof(struct ipr_dump_ioa_type_entry) -
2140 sizeof(struct ipr_dump_entry_header);
2141 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2142 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2143 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2144 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2145 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2146 ucode_vpd->minor_release[1];
2147 driver_dump->hdr.num_entries++;
2151 * ipr_dump_version_data - Fill in the driver version in the dump.
2152 * @ioa_cfg: ioa config struct
2153 * @driver_dump: driver dump struct
2158 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2159 struct ipr_driver_dump *driver_dump)
2161 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2162 driver_dump->version_entry.hdr.len =
2163 sizeof(struct ipr_dump_version_entry) -
2164 sizeof(struct ipr_dump_entry_header);
2165 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2166 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2167 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2168 driver_dump->hdr.num_entries++;
2172 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2173 * @ioa_cfg: ioa config struct
2174 * @driver_dump: driver dump struct
2179 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2180 struct ipr_driver_dump *driver_dump)
2182 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2183 driver_dump->trace_entry.hdr.len =
2184 sizeof(struct ipr_dump_trace_entry) -
2185 sizeof(struct ipr_dump_entry_header);
2186 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2187 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2188 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2189 driver_dump->hdr.num_entries++;
2193 * ipr_dump_location_data - Fill in the IOA location in the dump.
2194 * @ioa_cfg: ioa config struct
2195 * @driver_dump: driver dump struct
2200 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2201 struct ipr_driver_dump *driver_dump)
2203 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2204 driver_dump->location_entry.hdr.len =
2205 sizeof(struct ipr_dump_location_entry) -
2206 sizeof(struct ipr_dump_entry_header);
2207 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2208 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2209 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2210 driver_dump->hdr.num_entries++;
2214 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2215 * @ioa_cfg: ioa config struct
2216 * @dump: dump struct
2221 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2223 unsigned long start_addr, sdt_word;
2224 unsigned long lock_flags = 0;
2225 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2226 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2227 u32 num_entries, start_off, end_off;
2228 u32 bytes_to_copy, bytes_copied, rc;
2229 struct ipr_sdt *sdt;
2234 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2236 if (ioa_cfg->sdt_state != GET_DUMP) {
2237 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2241 start_addr = readl(ioa_cfg->ioa_mailbox);
2243 if (!ipr_sdt_is_fmt2(start_addr)) {
2244 dev_err(&ioa_cfg->pdev->dev,
2245 "Invalid dump table format: %lx\n", start_addr);
2246 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2250 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2252 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2254 /* Initialize the overall dump header */
2255 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2256 driver_dump->hdr.num_entries = 1;
2257 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2258 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2259 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2260 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2262 ipr_dump_version_data(ioa_cfg, driver_dump);
2263 ipr_dump_location_data(ioa_cfg, driver_dump);
2264 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2265 ipr_dump_trace_data(ioa_cfg, driver_dump);
2267 /* Update dump_header */
2268 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2270 /* IOA Dump entry */
2271 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2272 ioa_dump->format = IPR_SDT_FMT2;
2273 ioa_dump->hdr.len = 0;
2274 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2275 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2277 /* First entries in sdt are actually a list of dump addresses and
2278 lengths to gather the real dump data. sdt represents the pointer
2279 to the ioa generated dump table. Dump data will be extracted based
2280 on entries in this table */
2281 sdt = &ioa_dump->sdt;
2283 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2284 sizeof(struct ipr_sdt) / sizeof(__be32));
2286 /* Smart Dump table is ready to use and the first entry is valid */
2287 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2288 dev_err(&ioa_cfg->pdev->dev,
2289 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2290 rc, be32_to_cpu(sdt->hdr.state));
2291 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2292 ioa_cfg->sdt_state = DUMP_OBTAINED;
2293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2297 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2299 if (num_entries > IPR_NUM_SDT_ENTRIES)
2300 num_entries = IPR_NUM_SDT_ENTRIES;
2302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2304 for (i = 0; i < num_entries; i++) {
2305 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2306 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2310 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2311 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2312 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2313 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2315 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2316 bytes_to_copy = end_off - start_off;
2317 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2318 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2322 /* Copy data from adapter to driver buffers */
2323 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2326 ioa_dump->hdr.len += bytes_copied;
2328 if (bytes_copied != bytes_to_copy) {
2329 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2336 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2338 /* Update dump_header */
2339 driver_dump->hdr.len += ioa_dump->hdr.len;
2341 ioa_cfg->sdt_state = DUMP_OBTAINED;
2346 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2350 * ipr_release_dump - Free adapter dump memory
2351 * @kref: kref struct
2356 static void ipr_release_dump(struct kref *kref)
2358 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2359 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2360 unsigned long lock_flags = 0;
2364 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2365 ioa_cfg->dump = NULL;
2366 ioa_cfg->sdt_state = INACTIVE;
2367 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2369 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2370 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2377 * ipr_worker_thread - Worker thread
2378 * @work: ioa config struct
2380 * Called at task level from a work thread. This function takes care
2381 * of adding and removing device from the mid-layer as configuration
2382 * changes are detected by the adapter.
2387 static void ipr_worker_thread(struct work_struct *work)
2389 unsigned long lock_flags;
2390 struct ipr_resource_entry *res;
2391 struct scsi_device *sdev;
2392 struct ipr_dump *dump;
2393 struct ipr_ioa_cfg *ioa_cfg =
2394 container_of(work, struct ipr_ioa_cfg, work_q);
2395 u8 bus, target, lun;
2399 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2401 if (ioa_cfg->sdt_state == GET_DUMP) {
2402 dump = ioa_cfg->dump;
2404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2407 kref_get(&dump->kref);
2408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2409 ipr_get_ioa_dump(ioa_cfg, dump);
2410 kref_put(&dump->kref, ipr_release_dump);
2412 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2413 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2414 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2422 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2427 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2428 if (res->del_from_ml && res->sdev) {
2431 if (!scsi_device_get(sdev)) {
2432 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2434 scsi_remove_device(sdev);
2435 scsi_device_put(sdev);
2436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2444 if (res->add_to_ml) {
2445 bus = res->cfgte.res_addr.bus;
2446 target = res->cfgte.res_addr.target;
2447 lun = res->cfgte.res_addr.lun;
2449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2450 scsi_add_device(ioa_cfg->host, bus, target, lun);
2451 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2457 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2461 #ifdef CONFIG_SCSI_IPR_TRACE
2463 * ipr_read_trace - Dump the adapter trace
2464 * @kobj: kobject struct
2467 * @count: buffer size
2470 * number of bytes printed to buffer
2472 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2473 loff_t off, size_t count)
2475 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2476 struct Scsi_Host *shost = class_to_shost(cdev);
2477 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2478 unsigned long lock_flags = 0;
2479 int size = IPR_TRACE_SIZE;
2480 char *src = (char *)ioa_cfg->trace;
2484 if (off + count > size) {
2489 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2490 memcpy(buf, &src[off], count);
2491 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2495 static struct bin_attribute ipr_trace_attr = {
2501 .read = ipr_read_trace,
2505 static const struct {
2506 enum ipr_cache_state state;
2508 } cache_state [] = {
2509 { CACHE_NONE, "none" },
2510 { CACHE_DISABLED, "disabled" },
2511 { CACHE_ENABLED, "enabled" }
2515 * ipr_show_write_caching - Show the write caching attribute
2516 * @class_dev: class device struct
2520 * number of bytes printed to buffer
2522 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2524 struct Scsi_Host *shost = class_to_shost(class_dev);
2525 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2526 unsigned long lock_flags = 0;
2529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2530 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2531 if (cache_state[i].state == ioa_cfg->cache_state) {
2532 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2542 * ipr_store_write_caching - Enable/disable adapter write cache
2543 * @class_dev: class_device struct
2545 * @count: buffer size
2547 * This function will enable/disable adapter write cache.
2550 * count on success / other on failure
2552 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2553 const char *buf, size_t count)
2555 struct Scsi_Host *shost = class_to_shost(class_dev);
2556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2557 unsigned long lock_flags = 0;
2558 enum ipr_cache_state new_state = CACHE_INVALID;
2561 if (!capable(CAP_SYS_ADMIN))
2563 if (ioa_cfg->cache_state == CACHE_NONE)
2566 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2567 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2568 new_state = cache_state[i].state;
2573 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2576 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2577 if (ioa_cfg->cache_state == new_state) {
2578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2582 ioa_cfg->cache_state = new_state;
2583 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2584 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2585 if (!ioa_cfg->in_reset_reload)
2586 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2588 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2593 static struct class_device_attribute ipr_ioa_cache_attr = {
2595 .name = "write_cache",
2596 .mode = S_IRUGO | S_IWUSR,
2598 .show = ipr_show_write_caching,
2599 .store = ipr_store_write_caching
2603 * ipr_show_fw_version - Show the firmware version
2604 * @class_dev: class device struct
2608 * number of bytes printed to buffer
2610 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2612 struct Scsi_Host *shost = class_to_shost(class_dev);
2613 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2614 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2615 unsigned long lock_flags = 0;
2618 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2619 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2620 ucode_vpd->major_release, ucode_vpd->card_type,
2621 ucode_vpd->minor_release[0],
2622 ucode_vpd->minor_release[1]);
2623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2627 static struct class_device_attribute ipr_fw_version_attr = {
2629 .name = "fw_version",
2632 .show = ipr_show_fw_version,
2636 * ipr_show_log_level - Show the adapter's error logging level
2637 * @class_dev: class device struct
2641 * number of bytes printed to buffer
2643 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2645 struct Scsi_Host *shost = class_to_shost(class_dev);
2646 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2647 unsigned long lock_flags = 0;
2650 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2651 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2652 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2657 * ipr_store_log_level - Change the adapter's error logging level
2658 * @class_dev: class device struct
2662 * number of bytes printed to buffer
2664 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2665 const char *buf, size_t count)
2667 struct Scsi_Host *shost = class_to_shost(class_dev);
2668 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2669 unsigned long lock_flags = 0;
2671 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2672 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2677 static struct class_device_attribute ipr_log_level_attr = {
2679 .name = "log_level",
2680 .mode = S_IRUGO | S_IWUSR,
2682 .show = ipr_show_log_level,
2683 .store = ipr_store_log_level
2687 * ipr_store_diagnostics - IOA Diagnostics interface
2688 * @class_dev: class_device struct
2690 * @count: buffer size
2692 * This function will reset the adapter and wait a reasonable
2693 * amount of time for any errors that the adapter might log.
2696 * count on success / other on failure
2698 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2699 const char *buf, size_t count)
2701 struct Scsi_Host *shost = class_to_shost(class_dev);
2702 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2703 unsigned long lock_flags = 0;
2706 if (!capable(CAP_SYS_ADMIN))
2709 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2710 while(ioa_cfg->in_reset_reload) {
2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2713 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2716 ioa_cfg->errors_logged = 0;
2717 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2719 if (ioa_cfg->in_reset_reload) {
2720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2723 /* Wait for a second for any errors to be logged */
2726 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2730 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2731 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2738 static struct class_device_attribute ipr_diagnostics_attr = {
2740 .name = "run_diagnostics",
2743 .store = ipr_store_diagnostics
2747 * ipr_show_adapter_state - Show the adapter's state
2748 * @class_dev: class device struct
2752 * number of bytes printed to buffer
2754 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2756 struct Scsi_Host *shost = class_to_shost(class_dev);
2757 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2758 unsigned long lock_flags = 0;
2761 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2762 if (ioa_cfg->ioa_is_dead)
2763 len = snprintf(buf, PAGE_SIZE, "offline\n");
2765 len = snprintf(buf, PAGE_SIZE, "online\n");
2766 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2771 * ipr_store_adapter_state - Change adapter state
2772 * @class_dev: class_device struct
2774 * @count: buffer size
2776 * This function will change the adapter's state.
2779 * count on success / other on failure
2781 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2782 const char *buf, size_t count)
2784 struct Scsi_Host *shost = class_to_shost(class_dev);
2785 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2786 unsigned long lock_flags;
2789 if (!capable(CAP_SYS_ADMIN))
2792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2793 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2794 ioa_cfg->ioa_is_dead = 0;
2795 ioa_cfg->reset_retries = 0;
2796 ioa_cfg->in_ioa_bringdown = 0;
2797 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2800 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2805 static struct class_device_attribute ipr_ioa_state_attr = {
2808 .mode = S_IRUGO | S_IWUSR,
2810 .show = ipr_show_adapter_state,
2811 .store = ipr_store_adapter_state
2815 * ipr_store_reset_adapter - Reset the adapter
2816 * @class_dev: class_device struct
2818 * @count: buffer size
2820 * This function will reset the adapter.
2823 * count on success / other on failure
2825 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2826 const char *buf, size_t count)
2828 struct Scsi_Host *shost = class_to_shost(class_dev);
2829 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2830 unsigned long lock_flags;
2833 if (!capable(CAP_SYS_ADMIN))
2836 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2837 if (!ioa_cfg->in_reset_reload)
2838 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2840 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2845 static struct class_device_attribute ipr_ioa_reset_attr = {
2847 .name = "reset_host",
2850 .store = ipr_store_reset_adapter
2854 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2855 * @buf_len: buffer length
2857 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2858 * list to use for microcode download
2861 * pointer to sglist / NULL on failure
2863 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2865 int sg_size, order, bsize_elem, num_elem, i, j;
2866 struct ipr_sglist *sglist;
2867 struct scatterlist *scatterlist;
2870 /* Get the minimum size per scatter/gather element */
2871 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2873 /* Get the actual size per element */
2874 order = get_order(sg_size);
2876 /* Determine the actual number of bytes per element */
2877 bsize_elem = PAGE_SIZE * (1 << order);
2879 /* Determine the actual number of sg entries needed */
2880 if (buf_len % bsize_elem)
2881 num_elem = (buf_len / bsize_elem) + 1;
2883 num_elem = buf_len / bsize_elem;
2885 /* Allocate a scatter/gather list for the DMA */
2886 sglist = kzalloc(sizeof(struct ipr_sglist) +
2887 (sizeof(struct scatterlist) * (num_elem - 1)),
2890 if (sglist == NULL) {
2895 scatterlist = sglist->scatterlist;
2897 sglist->order = order;
2898 sglist->num_sg = num_elem;
2900 /* Allocate a bunch of sg elements */
2901 for (i = 0; i < num_elem; i++) {
2902 page = alloc_pages(GFP_KERNEL, order);
2906 /* Free up what we already allocated */
2907 for (j = i - 1; j >= 0; j--)
2908 __free_pages(scatterlist[j].page, order);
2913 scatterlist[i].page = page;
2920 * ipr_free_ucode_buffer - Frees a microcode download buffer
2921 * @p_dnld: scatter/gather list pointer
2923 * Free a DMA'able ucode download buffer previously allocated with
2924 * ipr_alloc_ucode_buffer
2929 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2933 for (i = 0; i < sglist->num_sg; i++)
2934 __free_pages(sglist->scatterlist[i].page, sglist->order);
2940 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2941 * @sglist: scatter/gather list pointer
2942 * @buffer: buffer pointer
2943 * @len: buffer length
2945 * Copy a microcode image from a user buffer into a buffer allocated by
2946 * ipr_alloc_ucode_buffer
2949 * 0 on success / other on failure
2951 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2952 u8 *buffer, u32 len)
2954 int bsize_elem, i, result = 0;
2955 struct scatterlist *scatterlist;
2958 /* Determine the actual number of bytes per element */
2959 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2961 scatterlist = sglist->scatterlist;
2963 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2964 kaddr = kmap(scatterlist[i].page);
2965 memcpy(kaddr, buffer, bsize_elem);
2966 kunmap(scatterlist[i].page);
2968 scatterlist[i].length = bsize_elem;
2976 if (len % bsize_elem) {
2977 kaddr = kmap(scatterlist[i].page);
2978 memcpy(kaddr, buffer, len % bsize_elem);
2979 kunmap(scatterlist[i].page);
2981 scatterlist[i].length = len % bsize_elem;
2984 sglist->buffer_len = len;
2989 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2990 * @ipr_cmd: ipr command struct
2991 * @sglist: scatter/gather list
2993 * Builds a microcode download IOA data list (IOADL).
2996 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2997 struct ipr_sglist *sglist)
2999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3000 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3001 struct scatterlist *scatterlist = sglist->scatterlist;
3004 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3005 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3006 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
3007 ioarcb->write_ioadl_len =
3008 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3010 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3011 ioadl[i].flags_and_data_len =
3012 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3014 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3017 ioadl[i-1].flags_and_data_len |=
3018 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3022 * ipr_update_ioa_ucode - Update IOA's microcode
3023 * @ioa_cfg: ioa config struct
3024 * @sglist: scatter/gather list
3026 * Initiate an adapter reset to update the IOA's microcode
3029 * 0 on success / -EIO on failure
3031 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3032 struct ipr_sglist *sglist)
3034 unsigned long lock_flags;
3036 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3037 while(ioa_cfg->in_reset_reload) {
3038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3039 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3040 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3043 if (ioa_cfg->ucode_sglist) {
3044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3045 dev_err(&ioa_cfg->pdev->dev,
3046 "Microcode download already in progress\n");
3050 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3051 sglist->num_sg, DMA_TO_DEVICE);
3053 if (!sglist->num_dma_sg) {
3054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3055 dev_err(&ioa_cfg->pdev->dev,
3056 "Failed to map microcode download buffer!\n");
3060 ioa_cfg->ucode_sglist = sglist;
3061 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3066 ioa_cfg->ucode_sglist = NULL;
3067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 * ipr_store_update_fw - Update the firmware on the adapter
3073 * @class_dev: class_device struct
3075 * @count: buffer size
3077 * This function will update the firmware on the adapter.
3080 * count on success / other on failure
3082 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3083 const char *buf, size_t count)
3085 struct Scsi_Host *shost = class_to_shost(class_dev);
3086 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3087 struct ipr_ucode_image_header *image_hdr;
3088 const struct firmware *fw_entry;
3089 struct ipr_sglist *sglist;
3092 int len, result, dnld_size;
3094 if (!capable(CAP_SYS_ADMIN))
3097 len = snprintf(fname, 99, "%s", buf);
3098 fname[len-1] = '\0';
3100 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3101 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3105 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3107 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3108 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3109 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3110 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3111 release_firmware(fw_entry);
3115 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3116 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3117 sglist = ipr_alloc_ucode_buffer(dnld_size);
3120 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3121 release_firmware(fw_entry);
3125 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3128 dev_err(&ioa_cfg->pdev->dev,
3129 "Microcode buffer copy to DMA buffer failed\n");
3133 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3138 ipr_free_ucode_buffer(sglist);
3139 release_firmware(fw_entry);
3143 static struct class_device_attribute ipr_update_fw_attr = {
3145 .name = "update_fw",
3148 .store = ipr_store_update_fw
3151 static struct class_device_attribute *ipr_ioa_attrs[] = {
3152 &ipr_fw_version_attr,
3153 &ipr_log_level_attr,
3154 &ipr_diagnostics_attr,
3155 &ipr_ioa_state_attr,
3156 &ipr_ioa_reset_attr,
3157 &ipr_update_fw_attr,
3158 &ipr_ioa_cache_attr,
3162 #ifdef CONFIG_SCSI_IPR_DUMP
3164 * ipr_read_dump - Dump the adapter
3165 * @kobj: kobject struct
3168 * @count: buffer size
3171 * number of bytes printed to buffer
3173 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3174 loff_t off, size_t count)
3176 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3177 struct Scsi_Host *shost = class_to_shost(cdev);
3178 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3179 struct ipr_dump *dump;
3180 unsigned long lock_flags = 0;
3185 if (!capable(CAP_SYS_ADMIN))
3188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189 dump = ioa_cfg->dump;
3191 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3195 kref_get(&dump->kref);
3196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3198 if (off > dump->driver_dump.hdr.len) {
3199 kref_put(&dump->kref, ipr_release_dump);
3203 if (off + count > dump->driver_dump.hdr.len) {
3204 count = dump->driver_dump.hdr.len - off;
3208 if (count && off < sizeof(dump->driver_dump)) {
3209 if (off + count > sizeof(dump->driver_dump))
3210 len = sizeof(dump->driver_dump) - off;
3213 src = (u8 *)&dump->driver_dump + off;
3214 memcpy(buf, src, len);
3220 off -= sizeof(dump->driver_dump);
3222 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3223 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3224 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3227 src = (u8 *)&dump->ioa_dump + off;
3228 memcpy(buf, src, len);
3234 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3237 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3238 len = PAGE_ALIGN(off) - off;
3241 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3242 src += off & ~PAGE_MASK;
3243 memcpy(buf, src, len);
3249 kref_put(&dump->kref, ipr_release_dump);
3254 * ipr_alloc_dump - Prepare for adapter dump
3255 * @ioa_cfg: ioa config struct
3258 * 0 on success / other on failure
3260 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3262 struct ipr_dump *dump;
3263 unsigned long lock_flags = 0;
3265 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3268 ipr_err("Dump memory allocation failed\n");
3272 kref_init(&dump->kref);
3273 dump->ioa_cfg = ioa_cfg;
3275 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3277 if (INACTIVE != ioa_cfg->sdt_state) {
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3283 ioa_cfg->dump = dump;
3284 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3285 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3286 ioa_cfg->dump_taken = 1;
3287 schedule_work(&ioa_cfg->work_q);
3289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3295 * ipr_free_dump - Free adapter dump memory
3296 * @ioa_cfg: ioa config struct
3299 * 0 on success / other on failure
3301 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3303 struct ipr_dump *dump;
3304 unsigned long lock_flags = 0;
3308 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3309 dump = ioa_cfg->dump;
3311 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315 ioa_cfg->dump = NULL;
3316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3318 kref_put(&dump->kref, ipr_release_dump);
3325 * ipr_write_dump - Setup dump state of adapter
3326 * @kobj: kobject struct
3329 * @count: buffer size
3332 * number of bytes printed to buffer
3334 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3335 loff_t off, size_t count)
3337 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3338 struct Scsi_Host *shost = class_to_shost(cdev);
3339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3342 if (!capable(CAP_SYS_ADMIN))
3346 rc = ipr_alloc_dump(ioa_cfg);
3347 else if (buf[0] == '0')
3348 rc = ipr_free_dump(ioa_cfg);
3358 static struct bin_attribute ipr_dump_attr = {
3361 .mode = S_IRUSR | S_IWUSR,
3364 .read = ipr_read_dump,
3365 .write = ipr_write_dump
3368 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3372 * ipr_change_queue_depth - Change the device's queue depth
3373 * @sdev: scsi device struct
3374 * @qdepth: depth to set
3379 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3381 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3382 struct ipr_resource_entry *res;
3383 unsigned long lock_flags = 0;
3385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3386 res = (struct ipr_resource_entry *)sdev->hostdata;
3388 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3389 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3390 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3392 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3393 return sdev->queue_depth;
3397 * ipr_change_queue_type - Change the device's queue type
3398 * @dsev: scsi device struct
3399 * @tag_type: type of tags to use
3402 * actual queue type set
3404 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3407 struct ipr_resource_entry *res;
3408 unsigned long lock_flags = 0;
3410 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3411 res = (struct ipr_resource_entry *)sdev->hostdata;
3414 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3416 * We don't bother quiescing the device here since the
3417 * adapter firmware does it for us.
3419 scsi_set_tag_type(sdev, tag_type);
3422 scsi_activate_tcq(sdev, sdev->queue_depth);
3424 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3436 * @dev: device struct
3440 * number of bytes printed to buffer
3442 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3444 struct scsi_device *sdev = to_scsi_device(dev);
3445 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3446 struct ipr_resource_entry *res;
3447 unsigned long lock_flags = 0;
3448 ssize_t len = -ENXIO;
3450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3451 res = (struct ipr_resource_entry *)sdev->hostdata;
3453 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3454 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3458 static struct device_attribute ipr_adapter_handle_attr = {
3460 .name = "adapter_handle",
3463 .show = ipr_show_adapter_handle
3466 static struct device_attribute *ipr_dev_attrs[] = {
3467 &ipr_adapter_handle_attr,
3472 * ipr_biosparam - Return the HSC mapping
3473 * @sdev: scsi device struct
3474 * @block_device: block device pointer
3475 * @capacity: capacity of the device
3476 * @parm: Array containing returned HSC values.
3478 * This function generates the HSC parms that fdisk uses.
3479 * We want to make sure we return something that places partitions
3480 * on 4k boundaries for best performance with the IOA.
3485 static int ipr_biosparam(struct scsi_device *sdev,
3486 struct block_device *block_device,
3487 sector_t capacity, int *parm)
3495 cylinders = capacity;
3496 sector_div(cylinders, (128 * 32));
3501 parm[2] = cylinders;
3507 * ipr_find_starget - Find target based on bus/target.
3508 * @starget: scsi target struct
3511 * resource entry pointer if found / NULL if not found
3513 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3515 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3516 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3517 struct ipr_resource_entry *res;
3519 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3520 if ((res->cfgte.res_addr.bus == starget->channel) &&
3521 (res->cfgte.res_addr.target == starget->id) &&
3522 (res->cfgte.res_addr.lun == 0)) {
3530 static struct ata_port_info sata_port_info;
3533 * ipr_target_alloc - Prepare for commands to a SCSI target
3534 * @starget: scsi target struct
3536 * If the device is a SATA device, this function allocates an
3537 * ATA port with libata, else it does nothing.
3540 * 0 on success / non-0 on failure
3542 static int ipr_target_alloc(struct scsi_target *starget)
3544 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3545 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3546 struct ipr_sata_port *sata_port;
3547 struct ata_port *ap;
3548 struct ipr_resource_entry *res;
3549 unsigned long lock_flags;
3551 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3552 res = ipr_find_starget(starget);
3553 starget->hostdata = NULL;
3555 if (res && ipr_is_gata(res)) {
3556 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3557 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3561 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3563 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3564 sata_port->ioa_cfg = ioa_cfg;
3566 sata_port->res = res;
3568 res->sata_port = sata_port;
3569 ap->private_data = sata_port;
3570 starget->hostdata = sata_port;
3576 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582 * ipr_target_destroy - Destroy a SCSI target
3583 * @starget: scsi target struct
3585 * If the device was a SATA device, this function frees the libata
3586 * ATA port, else it does nothing.
3589 static void ipr_target_destroy(struct scsi_target *starget)
3591 struct ipr_sata_port *sata_port = starget->hostdata;
3594 starget->hostdata = NULL;
3595 ata_sas_port_destroy(sata_port->ap);
3601 * ipr_find_sdev - Find device based on bus/target/lun.
3602 * @sdev: scsi device struct
3605 * resource entry pointer if found / NULL if not found
3607 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3610 struct ipr_resource_entry *res;
3612 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3613 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3614 (res->cfgte.res_addr.target == sdev->id) &&
3615 (res->cfgte.res_addr.lun == sdev->lun))
3623 * ipr_slave_destroy - Unconfigure a SCSI device
3624 * @sdev: scsi device struct
3629 static void ipr_slave_destroy(struct scsi_device *sdev)
3631 struct ipr_resource_entry *res;
3632 struct ipr_ioa_cfg *ioa_cfg;
3633 unsigned long lock_flags = 0;
3635 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3638 res = (struct ipr_resource_entry *) sdev->hostdata;
3641 ata_port_disable(res->sata_port->ap);
3642 sdev->hostdata = NULL;
3644 res->sata_port = NULL;
3646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3650 * ipr_slave_configure - Configure a SCSI device
3651 * @sdev: scsi device struct
3653 * This function configures the specified scsi device.
3658 static int ipr_slave_configure(struct scsi_device *sdev)
3660 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3661 struct ipr_resource_entry *res;
3662 unsigned long lock_flags = 0;
3664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3665 res = sdev->hostdata;
3667 if (ipr_is_af_dasd_device(res))
3668 sdev->type = TYPE_RAID;
3669 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3670 sdev->scsi_level = 4;
3671 sdev->no_uld_attach = 1;
3673 if (ipr_is_vset_device(res)) {
3674 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3675 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3677 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3678 sdev->allow_restart = 1;
3679 if (ipr_is_gata(res) && res->sata_port) {
3680 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3681 ata_sas_slave_configure(sdev, res->sata_port->ap);
3683 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3691 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3692 * @sdev: scsi device struct
3694 * This function initializes an ATA port so that future commands
3695 * sent through queuecommand will work.
3700 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3702 struct ipr_sata_port *sata_port = NULL;
3706 if (sdev->sdev_target)
3707 sata_port = sdev->sdev_target->hostdata;
3709 rc = ata_sas_port_init(sata_port->ap);
3711 ipr_slave_destroy(sdev);
3718 * ipr_slave_alloc - Prepare for commands to a device.
3719 * @sdev: scsi device struct
3721 * This function saves a pointer to the resource entry
3722 * in the scsi device struct if the device exists. We
3723 * can then use this pointer in ipr_queuecommand when
3724 * handling new commands.
3727 * 0 on success / -ENXIO if device does not exist
3729 static int ipr_slave_alloc(struct scsi_device *sdev)
3731 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3732 struct ipr_resource_entry *res;
3733 unsigned long lock_flags;
3736 sdev->hostdata = NULL;
3738 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3740 res = ipr_find_sdev(sdev);
3745 sdev->hostdata = res;
3746 if (!ipr_is_naca_model(res))
3747 res->needs_sync_complete = 1;
3749 if (ipr_is_gata(res)) {
3750 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3751 return ipr_ata_slave_alloc(sdev);
3755 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3761 * ipr_eh_host_reset - Reset the host adapter
3762 * @scsi_cmd: scsi command struct
3767 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3769 struct ipr_ioa_cfg *ioa_cfg;
3773 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3775 dev_err(&ioa_cfg->pdev->dev,
3776 "Adapter being reset as a result of error recovery.\n");
3778 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3779 ioa_cfg->sdt_state = GET_DUMP;
3781 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3787 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3791 spin_lock_irq(cmd->device->host->host_lock);
3792 rc = __ipr_eh_host_reset(cmd);
3793 spin_unlock_irq(cmd->device->host->host_lock);
3799 * ipr_device_reset - Reset the device
3800 * @ioa_cfg: ioa config struct
3801 * @res: resource entry struct
3803 * This function issues a device reset to the affected device.
3804 * If the device is a SCSI device, a LUN reset will be sent
3805 * to the device first. If that does not work, a target reset
3806 * will be sent. If the device is a SATA device, a PHY reset will
3810 * 0 on success / non-zero on failure
3812 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3813 struct ipr_resource_entry *res)
3815 struct ipr_cmnd *ipr_cmd;
3816 struct ipr_ioarcb *ioarcb;
3817 struct ipr_cmd_pkt *cmd_pkt;
3818 struct ipr_ioarcb_ata_regs *regs;
3822 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3823 ioarcb = &ipr_cmd->ioarcb;
3824 cmd_pkt = &ioarcb->cmd_pkt;
3825 regs = &ioarcb->add_data.u.regs;
3827 ioarcb->res_handle = res->cfgte.res_handle;
3828 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3829 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3830 if (ipr_is_gata(res)) {
3831 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3832 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3833 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3836 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3837 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3838 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3839 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3840 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3841 sizeof(struct ipr_ioasa_gata));
3844 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3848 * ipr_sata_reset - Reset the SATA port
3849 * @ap: SATA port to reset
3850 * @classes: class of the attached device
3852 * This function issues a SATA phy reset to the affected ATA port.
3855 * 0 on success / non-zero on failure
3857 static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3858 unsigned long deadline)
3860 struct ipr_sata_port *sata_port = ap->private_data;
3861 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3862 struct ipr_resource_entry *res;
3863 unsigned long lock_flags = 0;
3867 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3868 while(ioa_cfg->in_reset_reload) {
3869 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3870 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3871 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3874 res = sata_port->res;
3876 rc = ipr_device_reset(ioa_cfg, res);
3877 switch(res->cfgte.proto) {
3878 case IPR_PROTO_SATA:
3879 case IPR_PROTO_SAS_STP:
3880 *classes = ATA_DEV_ATA;
3882 case IPR_PROTO_SATA_ATAPI:
3883 case IPR_PROTO_SAS_STP_ATAPI:
3884 *classes = ATA_DEV_ATAPI;
3887 *classes = ATA_DEV_UNKNOWN;
3892 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3898 * ipr_eh_dev_reset - Reset the device
3899 * @scsi_cmd: scsi command struct
3901 * This function issues a device reset to the affected device.
3902 * A LUN reset will be sent to the device first. If that does
3903 * not work, a target reset will be sent.
3908 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3910 struct ipr_cmnd *ipr_cmd;
3911 struct ipr_ioa_cfg *ioa_cfg;
3912 struct ipr_resource_entry *res;
3913 struct ata_port *ap;
3917 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3918 res = scsi_cmd->device->hostdata;
3924 * If we are currently going through reset/reload, return failed. This will force the
3925 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3928 if (ioa_cfg->in_reset_reload)
3930 if (ioa_cfg->ioa_is_dead)
3933 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3934 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3935 if (ipr_cmd->scsi_cmd)
3936 ipr_cmd->done = ipr_scsi_eh_done;
3938 ipr_cmd->done = ipr_sata_eh_done;
3939 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3940 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3941 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3946 res->resetting_device = 1;
3947 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3949 if (ipr_is_gata(res) && res->sata_port) {
3950 ap = res->sata_port->ap;
3951 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3952 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3953 spin_lock_irq(scsi_cmd->device->host->host_lock);
3955 rc = ipr_device_reset(ioa_cfg, res);
3956 res->resetting_device = 0;
3959 return (rc ? FAILED : SUCCESS);
3962 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3966 spin_lock_irq(cmd->device->host->host_lock);
3967 rc = __ipr_eh_dev_reset(cmd);
3968 spin_unlock_irq(cmd->device->host->host_lock);
3974 * ipr_bus_reset_done - Op done function for bus reset.
3975 * @ipr_cmd: ipr command struct
3977 * This function is the op done function for a bus reset
3982 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3984 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3985 struct ipr_resource_entry *res;
3988 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3989 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3990 sizeof(res->cfgte.res_handle))) {
3991 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3997 * If abort has not completed, indicate the reset has, else call the
3998 * abort's done function to wake the sleeping eh thread
4000 if (ipr_cmd->sibling->sibling)
4001 ipr_cmd->sibling->sibling = NULL;
4003 ipr_cmd->sibling->done(ipr_cmd->sibling);
4005 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4010 * ipr_abort_timeout - An abort task has timed out
4011 * @ipr_cmd: ipr command struct
4013 * This function handles when an abort task times out. If this
4014 * happens we issue a bus reset since we have resources tied
4015 * up that must be freed before returning to the midlayer.
4020 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4022 struct ipr_cmnd *reset_cmd;
4023 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4024 struct ipr_cmd_pkt *cmd_pkt;
4025 unsigned long lock_flags = 0;
4028 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4029 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4034 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4035 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4036 ipr_cmd->sibling = reset_cmd;
4037 reset_cmd->sibling = ipr_cmd;
4038 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4039 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4040 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4041 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4042 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4044 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4045 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4050 * ipr_cancel_op - Cancel specified op
4051 * @scsi_cmd: scsi command struct
4053 * This function cancels specified op.
4058 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4060 struct ipr_cmnd *ipr_cmd;
4061 struct ipr_ioa_cfg *ioa_cfg;
4062 struct ipr_resource_entry *res;
4063 struct ipr_cmd_pkt *cmd_pkt;
4068 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4069 res = scsi_cmd->device->hostdata;
4071 /* If we are currently going through reset/reload, return failed.
4072 * This will force the mid-layer to call ipr_eh_host_reset,
4073 * which will then go to sleep and wait for the reset to complete
4075 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4077 if (!res || !ipr_is_gscsi(res))
4080 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4081 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4082 ipr_cmd->done = ipr_scsi_eh_done;
4091 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4092 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4093 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4094 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4095 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4096 ipr_cmd->u.sdev = scsi_cmd->device;
4098 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4100 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4101 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4104 * If the abort task timed out and we sent a bus reset, we will get
4105 * one the following responses to the abort
4107 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4112 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4113 if (!ipr_is_naca_model(res))
4114 res->needs_sync_complete = 1;
4117 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4121 * ipr_eh_abort - Abort a single op
4122 * @scsi_cmd: scsi command struct
4127 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4129 unsigned long flags;
4134 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4135 rc = ipr_cancel_op(scsi_cmd);
4136 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4143 * ipr_handle_other_interrupt - Handle "other" interrupts
4144 * @ioa_cfg: ioa config struct
4145 * @int_reg: interrupt register
4148 * IRQ_NONE / IRQ_HANDLED
4150 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4151 volatile u32 int_reg)
4153 irqreturn_t rc = IRQ_HANDLED;
4155 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4156 /* Mask the interrupt */
4157 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4159 /* Clear the interrupt */
4160 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4161 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4163 list_del(&ioa_cfg->reset_cmd->queue);
4164 del_timer(&ioa_cfg->reset_cmd->timer);
4165 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4167 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4168 ioa_cfg->ioa_unit_checked = 1;
4170 dev_err(&ioa_cfg->pdev->dev,
4171 "Permanent IOA failure. 0x%08X\n", int_reg);
4173 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4174 ioa_cfg->sdt_state = GET_DUMP;
4176 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4177 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4184 * ipr_isr - Interrupt service routine
4186 * @devp: pointer to ioa config struct
4189 * IRQ_NONE / IRQ_HANDLED
4191 static irqreturn_t ipr_isr(int irq, void *devp)
4193 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4194 unsigned long lock_flags = 0;
4195 volatile u32 int_reg, int_mask_reg;
4198 struct ipr_cmnd *ipr_cmd;
4199 irqreturn_t rc = IRQ_NONE;
4201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4203 /* If interrupts are disabled, ignore the interrupt */
4204 if (!ioa_cfg->allow_interrupts) {
4205 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4209 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4210 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4212 /* If an interrupt on the adapter did not occur, ignore it */
4213 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4221 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4222 ioa_cfg->toggle_bit) {
4224 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4225 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4227 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4228 ioa_cfg->errors_logged++;
4229 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4231 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4232 ioa_cfg->sdt_state = GET_DUMP;
4234 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4239 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4241 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4243 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4245 list_del(&ipr_cmd->queue);
4246 del_timer(&ipr_cmd->timer);
4247 ipr_cmd->done(ipr_cmd);
4251 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4252 ioa_cfg->hrrq_curr++;
4254 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4255 ioa_cfg->toggle_bit ^= 1u;
4259 if (ipr_cmd != NULL) {
4260 /* Clear the PCI interrupt */
4261 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4262 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4267 if (unlikely(rc == IRQ_NONE))
4268 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4275 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4276 * @ioa_cfg: ioa config struct
4277 * @ipr_cmd: ipr command struct
4280 * 0 on success / -1 on failure
4282 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4283 struct ipr_cmnd *ipr_cmd)
4286 struct scatterlist *sglist;
4288 u32 ioadl_flags = 0;
4289 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4290 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4291 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4293 length = scsi_cmd->request_bufflen;
4298 if (scsi_cmd->use_sg) {
4299 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4300 scsi_cmd->request_buffer,
4302 scsi_cmd->sc_data_direction);
4304 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4305 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4306 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4307 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4308 ioarcb->write_ioadl_len =
4309 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4310 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4311 ioadl_flags = IPR_IOADL_FLAGS_READ;
4312 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4313 ioarcb->read_ioadl_len =
4314 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4317 sglist = scsi_cmd->request_buffer;
4319 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4320 ioadl = ioarcb->add_data.u.ioadl;
4321 ioarcb->write_ioadl_addr =
4322 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4323 offsetof(struct ipr_ioarcb, add_data));
4324 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4327 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4328 ioadl[i].flags_and_data_len =
4329 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4331 cpu_to_be32(sg_dma_address(&sglist[i]));
4334 if (likely(ipr_cmd->dma_use_sg)) {
4335 ioadl[i-1].flags_and_data_len |=
4336 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4339 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4341 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4342 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4343 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4344 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4345 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4346 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4347 ioadl_flags = IPR_IOADL_FLAGS_READ;
4348 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4349 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4352 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4353 scsi_cmd->request_buffer, length,
4354 scsi_cmd->sc_data_direction);
4356 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4357 ioadl = ioarcb->add_data.u.ioadl;
4358 ioarcb->write_ioadl_addr =
4359 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4360 offsetof(struct ipr_ioarcb, add_data));
4361 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4362 ipr_cmd->dma_use_sg = 1;
4363 ioadl[0].flags_and_data_len =
4364 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4365 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4368 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4375 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4376 * @scsi_cmd: scsi command struct
4381 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4384 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4386 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4388 case MSG_SIMPLE_TAG:
4389 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4392 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4394 case MSG_ORDERED_TAG:
4395 rc = IPR_FLAGS_LO_ORDERED_TASK;
4404 * ipr_erp_done - Process completion of ERP for a device
4405 * @ipr_cmd: ipr command struct
4407 * This function copies the sense buffer into the scsi_cmd
4408 * struct and pushes the scsi_done function.
4413 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4415 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4416 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4417 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4418 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4420 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4421 scsi_cmd->result |= (DID_ERROR << 16);
4422 scmd_printk(KERN_ERR, scsi_cmd,
4423 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4425 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4426 SCSI_SENSE_BUFFERSIZE);
4430 if (!ipr_is_naca_model(res))
4431 res->needs_sync_complete = 1;
4434 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4435 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4436 scsi_cmd->scsi_done(scsi_cmd);
4440 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4441 * @ipr_cmd: ipr command struct
4446 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4448 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4449 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4450 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4452 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4453 ioarcb->write_data_transfer_length = 0;
4454 ioarcb->read_data_transfer_length = 0;
4455 ioarcb->write_ioadl_len = 0;
4456 ioarcb->read_ioadl_len = 0;
4458 ioasa->residual_data_len = 0;
4459 ioarcb->write_ioadl_addr =
4460 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4461 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4465 * ipr_erp_request_sense - Send request sense to a device
4466 * @ipr_cmd: ipr command struct
4468 * This function sends a request sense to a device as a result
4469 * of a check condition.
4474 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4476 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4477 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4479 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4480 ipr_erp_done(ipr_cmd);
4484 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4486 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4487 cmd_pkt->cdb[0] = REQUEST_SENSE;
4488 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4489 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4490 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4491 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4493 ipr_cmd->ioadl[0].flags_and_data_len =
4494 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4495 ipr_cmd->ioadl[0].address =
4496 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4498 ipr_cmd->ioarcb.read_ioadl_len =
4499 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4500 ipr_cmd->ioarcb.read_data_transfer_length =
4501 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4503 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4504 IPR_REQUEST_SENSE_TIMEOUT * 2);
4508 * ipr_erp_cancel_all - Send cancel all to a device
4509 * @ipr_cmd: ipr command struct
4511 * This function sends a cancel all to a device to clear the
4512 * queue. If we are running TCQ on the device, QERR is set to 1,
4513 * which means all outstanding ops have been dropped on the floor.
4514 * Cancel all will return them to us.
4519 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4521 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4522 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4523 struct ipr_cmd_pkt *cmd_pkt;
4527 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4529 if (!scsi_get_tag_type(scsi_cmd->device)) {
4530 ipr_erp_request_sense(ipr_cmd);
4534 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4535 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4536 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4538 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4539 IPR_CANCEL_ALL_TIMEOUT);
4543 * ipr_dump_ioasa - Dump contents of IOASA
4544 * @ioa_cfg: ioa config struct
4545 * @ipr_cmd: ipr command struct
4546 * @res: resource entry struct
4548 * This function is invoked by the interrupt handler when ops
4549 * fail. It will log the IOASA if appropriate. Only called
4555 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4556 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4560 u32 ioasc, fd_ioasc;
4561 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4562 __be32 *ioasa_data = (__be32 *)ioasa;
4565 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4566 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4571 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4574 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4575 error_index = ipr_get_error(fd_ioasc);
4577 error_index = ipr_get_error(ioasc);
4579 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4580 /* Don't log an error if the IOA already logged one */
4581 if (ioasa->ilid != 0)
4584 if (!ipr_is_gscsi(res))
4587 if (ipr_error_table[error_index].log_ioasa == 0)
4591 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4593 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4594 data_len = sizeof(struct ipr_ioasa);
4596 data_len = be16_to_cpu(ioasa->ret_stat_len);
4598 ipr_err("IOASA Dump:\n");
4600 for (i = 0; i < data_len / 4; i += 4) {
4601 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4602 be32_to_cpu(ioasa_data[i]),
4603 be32_to_cpu(ioasa_data[i+1]),
4604 be32_to_cpu(ioasa_data[i+2]),
4605 be32_to_cpu(ioasa_data[i+3]));
4610 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4612 * @sense_buf: sense data buffer
4617 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4620 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4621 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4622 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4623 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4625 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4627 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4630 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4632 if (ipr_is_vset_device(res) &&
4633 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4634 ioasa->u.vset.failing_lba_hi != 0) {
4635 sense_buf[0] = 0x72;
4636 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4637 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4638 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4642 sense_buf[9] = 0x0A;
4643 sense_buf[10] = 0x80;
4645 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4647 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4648 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4649 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4650 sense_buf[15] = failing_lba & 0x000000ff;
4652 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4654 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4655 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4656 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4657 sense_buf[19] = failing_lba & 0x000000ff;
4659 sense_buf[0] = 0x70;
4660 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4661 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4662 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4664 /* Illegal request */
4665 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4666 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4667 sense_buf[7] = 10; /* additional length */
4669 /* IOARCB was in error */
4670 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4671 sense_buf[15] = 0xC0;
4672 else /* Parameter data was invalid */
4673 sense_buf[15] = 0x80;
4676 ((IPR_FIELD_POINTER_MASK &
4677 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4679 (IPR_FIELD_POINTER_MASK &
4680 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4682 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4683 if (ipr_is_vset_device(res))
4684 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4686 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4688 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4689 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4690 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4691 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4692 sense_buf[6] = failing_lba & 0x000000ff;
4695 sense_buf[7] = 6; /* additional length */
4701 * ipr_get_autosense - Copy autosense data to sense buffer
4702 * @ipr_cmd: ipr command struct
4704 * This function copies the autosense buffer to the buffer
4705 * in the scsi_cmd, if there is autosense available.
4708 * 1 if autosense was available / 0 if not
4710 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4712 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4714 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4717 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4718 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4719 SCSI_SENSE_BUFFERSIZE));
4724 * ipr_erp_start - Process an error response for a SCSI op
4725 * @ioa_cfg: ioa config struct
4726 * @ipr_cmd: ipr command struct
4728 * This function determines whether or not to initiate ERP
4729 * on the affected device.
4734 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4735 struct ipr_cmnd *ipr_cmd)
4737 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4738 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4739 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4740 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4743 ipr_scsi_eh_done(ipr_cmd);
4747 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4748 ipr_gen_sense(ipr_cmd);
4750 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4752 switch (masked_ioasc) {
4753 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4754 if (ipr_is_naca_model(res))
4755 scsi_cmd->result |= (DID_ABORT << 16);
4757 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4759 case IPR_IOASC_IR_RESOURCE_HANDLE:
4760 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4761 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4763 case IPR_IOASC_HW_SEL_TIMEOUT:
4764 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4765 if (!ipr_is_naca_model(res))
4766 res->needs_sync_complete = 1;
4768 case IPR_IOASC_SYNC_REQUIRED:
4770 res->needs_sync_complete = 1;
4771 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4773 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4774 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4775 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4777 case IPR_IOASC_BUS_WAS_RESET:
4778 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4780 * Report the bus reset and ask for a retry. The device
4781 * will give CC/UA the next command.
4783 if (!res->resetting_device)
4784 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4785 scsi_cmd->result |= (DID_ERROR << 16);
4786 if (!ipr_is_naca_model(res))
4787 res->needs_sync_complete = 1;
4789 case IPR_IOASC_HW_DEV_BUS_STATUS:
4790 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4791 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4792 if (!ipr_get_autosense(ipr_cmd)) {
4793 if (!ipr_is_naca_model(res)) {
4794 ipr_erp_cancel_all(ipr_cmd);
4799 if (!ipr_is_naca_model(res))
4800 res->needs_sync_complete = 1;
4802 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4805 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4806 scsi_cmd->result |= (DID_ERROR << 16);
4807 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4808 res->needs_sync_complete = 1;
4812 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4813 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4814 scsi_cmd->scsi_done(scsi_cmd);
4818 * ipr_scsi_done - mid-layer done function
4819 * @ipr_cmd: ipr command struct
4821 * This function is invoked by the interrupt handler for
4822 * ops generated by the SCSI mid-layer
4827 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4829 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4830 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4831 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4833 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4835 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4836 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4837 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4838 scsi_cmd->scsi_done(scsi_cmd);
4840 ipr_erp_start(ioa_cfg, ipr_cmd);
4844 * ipr_queuecommand - Queue a mid-layer request
4845 * @scsi_cmd: scsi command struct
4846 * @done: done function
4848 * This function queues a request generated by the mid-layer.
4852 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4853 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4855 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4856 void (*done) (struct scsi_cmnd *))
4858 struct ipr_ioa_cfg *ioa_cfg;
4859 struct ipr_resource_entry *res;
4860 struct ipr_ioarcb *ioarcb;
4861 struct ipr_cmnd *ipr_cmd;
4864 scsi_cmd->scsi_done = done;
4865 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4866 res = scsi_cmd->device->hostdata;
4867 scsi_cmd->result = (DID_OK << 16);
4870 * We are currently blocking all devices due to a host reset
4871 * We have told the host to stop giving us new requests, but
4872 * ERP ops don't count. FIXME
4874 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4875 return SCSI_MLQUEUE_HOST_BUSY;
4878 * FIXME - Create scsi_set_host_offline interface
4879 * and the ioa_is_dead check can be removed
4881 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4882 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4883 scsi_cmd->result = (DID_NO_CONNECT << 16);
4884 scsi_cmd->scsi_done(scsi_cmd);
4888 if (ipr_is_gata(res) && res->sata_port)
4889 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4891 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4892 ioarcb = &ipr_cmd->ioarcb;
4893 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4895 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4896 ipr_cmd->scsi_cmd = scsi_cmd;
4897 ioarcb->res_handle = res->cfgte.res_handle;
4898 ipr_cmd->done = ipr_scsi_done;
4899 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4901 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4902 if (scsi_cmd->underflow == 0)
4903 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4905 if (res->needs_sync_complete) {
4906 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4907 res->needs_sync_complete = 0;
4910 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4911 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4912 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4913 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4916 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4917 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4918 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4920 if (likely(rc == 0))
4921 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4923 if (likely(rc == 0)) {
4925 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4926 ioa_cfg->regs.ioarrin_reg);
4928 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4929 return SCSI_MLQUEUE_HOST_BUSY;
4936 * ipr_ioctl - IOCTL handler
4937 * @sdev: scsi device struct
4942 * 0 on success / other on failure
4944 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4946 struct ipr_resource_entry *res;
4948 res = (struct ipr_resource_entry *)sdev->hostdata;
4949 if (res && ipr_is_gata(res))
4950 return ata_scsi_ioctl(sdev, cmd, arg);
4956 * ipr_info - Get information about the card/driver
4957 * @scsi_host: scsi host struct
4960 * pointer to buffer with description string
4962 static const char * ipr_ioa_info(struct Scsi_Host *host)
4964 static char buffer[512];
4965 struct ipr_ioa_cfg *ioa_cfg;
4966 unsigned long lock_flags = 0;
4968 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4970 spin_lock_irqsave(host->host_lock, lock_flags);
4971 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4972 spin_unlock_irqrestore(host->host_lock, lock_flags);
4977 static struct scsi_host_template driver_template = {
4978 .module = THIS_MODULE,
4980 .info = ipr_ioa_info,
4982 .queuecommand = ipr_queuecommand,
4983 .eh_abort_handler = ipr_eh_abort,
4984 .eh_device_reset_handler = ipr_eh_dev_reset,
4985 .eh_host_reset_handler = ipr_eh_host_reset,
4986 .slave_alloc = ipr_slave_alloc,
4987 .slave_configure = ipr_slave_configure,
4988 .slave_destroy = ipr_slave_destroy,
4989 .target_alloc = ipr_target_alloc,
4990 .target_destroy = ipr_target_destroy,
4991 .change_queue_depth = ipr_change_queue_depth,
4992 .change_queue_type = ipr_change_queue_type,
4993 .bios_param = ipr_biosparam,
4994 .can_queue = IPR_MAX_COMMANDS,
4996 .sg_tablesize = IPR_MAX_SGLIST,
4997 .max_sectors = IPR_IOA_MAX_SECTORS,
4998 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4999 .use_clustering = ENABLE_CLUSTERING,
5000 .shost_attrs = ipr_ioa_attrs,
5001 .sdev_attrs = ipr_dev_attrs,
5002 .proc_name = IPR_NAME
5006 * ipr_ata_phy_reset - libata phy_reset handler
5007 * @ap: ata port to reset
5010 static void ipr_ata_phy_reset(struct ata_port *ap)
5012 unsigned long flags;
5013 struct ipr_sata_port *sata_port = ap->private_data;
5014 struct ipr_resource_entry *res = sata_port->res;
5015 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5019 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5020 while(ioa_cfg->in_reset_reload) {
5021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5022 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5023 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5026 if (!ioa_cfg->allow_cmds)
5029 rc = ipr_device_reset(ioa_cfg, res);
5032 ap->ops->port_disable(ap);
5036 switch(res->cfgte.proto) {
5037 case IPR_PROTO_SATA:
5038 case IPR_PROTO_SAS_STP:
5039 ap->device[0].class = ATA_DEV_ATA;
5041 case IPR_PROTO_SATA_ATAPI:
5042 case IPR_PROTO_SAS_STP_ATAPI:
5043 ap->device[0].class = ATA_DEV_ATAPI;
5046 ap->device[0].class = ATA_DEV_UNKNOWN;
5047 ap->ops->port_disable(ap);
5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5057 * ipr_ata_post_internal - Cleanup after an internal command
5058 * @qc: ATA queued command
5063 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5065 struct ipr_sata_port *sata_port = qc->ap->private_data;
5066 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5067 struct ipr_cmnd *ipr_cmd;
5068 unsigned long flags;
5070 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5071 while(ioa_cfg->in_reset_reload) {
5072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5073 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5074 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5077 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5078 if (ipr_cmd->qc == qc) {
5079 ipr_device_reset(ioa_cfg, sata_port->res);
5083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5087 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5089 * @tf: destination ATA taskfile
5094 static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5096 struct ipr_sata_port *sata_port = ap->private_data;
5097 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5099 tf->feature = g->error;
5100 tf->nsect = g->nsect;
5104 tf->device = g->device;
5105 tf->command = g->status;
5106 tf->hob_nsect = g->hob_nsect;
5107 tf->hob_lbal = g->hob_lbal;
5108 tf->hob_lbam = g->hob_lbam;
5109 tf->hob_lbah = g->hob_lbah;
5110 tf->ctl = g->alt_status;
5114 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5115 * @regs: destination
5116 * @tf: source ATA taskfile
5121 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5122 struct ata_taskfile *tf)
5124 regs->feature = tf->feature;
5125 regs->nsect = tf->nsect;
5126 regs->lbal = tf->lbal;
5127 regs->lbam = tf->lbam;
5128 regs->lbah = tf->lbah;
5129 regs->device = tf->device;
5130 regs->command = tf->command;
5131 regs->hob_feature = tf->hob_feature;
5132 regs->hob_nsect = tf->hob_nsect;
5133 regs->hob_lbal = tf->hob_lbal;
5134 regs->hob_lbam = tf->hob_lbam;
5135 regs->hob_lbah = tf->hob_lbah;
5136 regs->ctl = tf->ctl;
5140 * ipr_sata_done - done function for SATA commands
5141 * @ipr_cmd: ipr command struct
5143 * This function is invoked by the interrupt handler for
5144 * ops generated by the SCSI mid-layer to SATA devices
5149 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5151 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5152 struct ata_queued_cmd *qc = ipr_cmd->qc;
5153 struct ipr_sata_port *sata_port = qc->ap->private_data;
5154 struct ipr_resource_entry *res = sata_port->res;
5155 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5157 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5158 sizeof(struct ipr_ioasa_gata));
5159 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5161 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5162 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5163 res->cfgte.res_addr.target);
5165 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5166 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5168 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5169 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5170 ata_qc_complete(qc);
5174 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5175 * @ipr_cmd: ipr command struct
5176 * @qc: ATA queued command
5179 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5180 struct ata_queued_cmd *qc)
5182 u32 ioadl_flags = 0;
5183 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5184 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5185 int len = qc->nbytes + qc->pad_len;
5186 struct scatterlist *sg;
5191 if (qc->dma_dir == DMA_TO_DEVICE) {
5192 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5193 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5194 ioarcb->write_data_transfer_length = cpu_to_be32(len);
5195 ioarcb->write_ioadl_len =
5196 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5197 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5198 ioadl_flags = IPR_IOADL_FLAGS_READ;
5199 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5200 ioarcb->read_ioadl_len =
5201 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5204 ata_for_each_sg(sg, qc) {
5205 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5206 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5207 if (ata_sg_is_last(sg, qc))
5208 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5215 * ipr_qc_issue - Issue a SATA qc to a device
5216 * @qc: queued command
5221 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5223 struct ata_port *ap = qc->ap;
5224 struct ipr_sata_port *sata_port = ap->private_data;
5225 struct ipr_resource_entry *res = sata_port->res;
5226 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5227 struct ipr_cmnd *ipr_cmd;
5228 struct ipr_ioarcb *ioarcb;
5229 struct ipr_ioarcb_ata_regs *regs;
5231 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5232 return AC_ERR_SYSTEM;
5234 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5235 ioarcb = &ipr_cmd->ioarcb;
5236 regs = &ioarcb->add_data.u.regs;
5238 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5239 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5241 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5243 ipr_cmd->done = ipr_sata_done;
5244 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5245 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5246 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5247 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5248 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5250 ipr_build_ata_ioadl(ipr_cmd, qc);
5251 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5252 ipr_copy_sata_tf(regs, &qc->tf);
5253 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5254 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5256 switch (qc->tf.protocol) {
5257 case ATA_PROT_NODATA:
5262 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5265 case ATA_PROT_ATAPI:
5266 case ATA_PROT_ATAPI_NODATA:
5267 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5270 case ATA_PROT_ATAPI_DMA:
5271 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5272 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5277 return AC_ERR_INVALID;
5281 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5282 ioa_cfg->regs.ioarrin_reg);
5287 * ipr_ata_check_status - Return last ATA status
5293 static u8 ipr_ata_check_status(struct ata_port *ap)
5295 struct ipr_sata_port *sata_port = ap->private_data;
5296 return sata_port->ioasa.status;
5300 * ipr_ata_check_altstatus - Return last ATA altstatus
5306 static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5308 struct ipr_sata_port *sata_port = ap->private_data;
5309 return sata_port->ioasa.alt_status;
5312 static struct ata_port_operations ipr_sata_ops = {
5313 .port_disable = ata_port_disable,
5314 .check_status = ipr_ata_check_status,
5315 .check_altstatus = ipr_ata_check_altstatus,
5316 .dev_select = ata_noop_dev_select,
5317 .phy_reset = ipr_ata_phy_reset,
5318 .post_internal_cmd = ipr_ata_post_internal,
5319 .tf_read = ipr_tf_read,
5320 .qc_prep = ata_noop_qc_prep,
5321 .qc_issue = ipr_qc_issue,
5322 .port_start = ata_sas_port_start,
5323 .port_stop = ata_sas_port_stop
5326 static struct ata_port_info sata_port_info = {
5327 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5328 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5329 .pio_mask = 0x10, /* pio4 */
5331 .udma_mask = 0x7f, /* udma0-6 */
5332 .port_ops = &ipr_sata_ops
5335 #ifdef CONFIG_PPC_PSERIES
5336 static const u16 ipr_blocked_processors[] = {
5348 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5349 * @ioa_cfg: ioa cfg struct
5351 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5352 * certain pSeries hardware. This function determines if the given
5353 * adapter is in one of these confgurations or not.
5356 * 1 if adapter is not supported / 0 if adapter is supported
5358 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5363 if (ioa_cfg->type == 0x5702) {
5364 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5365 &rev_id) == PCIBIOS_SUCCESSFUL) {
5367 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5368 if (__is_processor(ipr_blocked_processors[i]))
5377 #define ipr_invalid_adapter(ioa_cfg) 0
5381 * ipr_ioa_bringdown_done - IOA bring down completion.
5382 * @ipr_cmd: ipr command struct
5384 * This function processes the completion of an adapter bring down.
5385 * It wakes any reset sleepers.
5390 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5395 ioa_cfg->in_reset_reload = 0;
5396 ioa_cfg->reset_retries = 0;
5397 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5398 wake_up_all(&ioa_cfg->reset_wait_q);
5400 spin_unlock_irq(ioa_cfg->host->host_lock);
5401 scsi_unblock_requests(ioa_cfg->host);
5402 spin_lock_irq(ioa_cfg->host->host_lock);
5405 return IPR_RC_JOB_RETURN;
5409 * ipr_ioa_reset_done - IOA reset completion.
5410 * @ipr_cmd: ipr command struct
5412 * This function processes the completion of an adapter reset.
5413 * It schedules any necessary mid-layer add/removes and
5414 * wakes any reset sleepers.
5419 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5421 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5422 struct ipr_resource_entry *res;
5423 struct ipr_hostrcb *hostrcb, *temp;
5427 ioa_cfg->in_reset_reload = 0;
5428 ioa_cfg->allow_cmds = 1;
5429 ioa_cfg->reset_cmd = NULL;
5430 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5432 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5433 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5438 schedule_work(&ioa_cfg->work_q);
5440 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5441 list_del(&hostrcb->queue);
5442 if (i++ < IPR_NUM_LOG_HCAMS)
5443 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5445 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5448 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5449 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5451 ioa_cfg->reset_retries = 0;
5452 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5453 wake_up_all(&ioa_cfg->reset_wait_q);
5455 spin_unlock_irq(ioa_cfg->host->host_lock);
5456 scsi_unblock_requests(ioa_cfg->host);
5457 spin_lock_irq(ioa_cfg->host->host_lock);
5459 if (!ioa_cfg->allow_cmds)
5460 scsi_block_requests(ioa_cfg->host);
5463 return IPR_RC_JOB_RETURN;
5467 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5468 * @supported_dev: supported device struct
5469 * @vpids: vendor product id struct
5474 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5475 struct ipr_std_inq_vpids *vpids)
5477 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5478 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5479 supported_dev->num_records = 1;
5480 supported_dev->data_length =
5481 cpu_to_be16(sizeof(struct ipr_supported_device));
5482 supported_dev->reserved = 0;
5486 * ipr_set_supported_devs - Send Set Supported Devices for a device
5487 * @ipr_cmd: ipr command struct
5489 * This function send a Set Supported Devices to the adapter
5492 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5494 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5496 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5497 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5498 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5499 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5500 struct ipr_resource_entry *res = ipr_cmd->u.res;
5502 ipr_cmd->job_step = ipr_ioa_reset_done;
5504 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5505 if (!ipr_is_scsi_disk(res))
5508 ipr_cmd->u.res = res;
5509 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5511 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5512 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5513 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5515 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5516 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5517 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5519 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5520 sizeof(struct ipr_supported_device));
5521 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5522 offsetof(struct ipr_misc_cbs, supp_dev));
5523 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5524 ioarcb->write_data_transfer_length =
5525 cpu_to_be32(sizeof(struct ipr_supported_device));
5527 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5528 IPR_SET_SUP_DEVICE_TIMEOUT);
5530 ipr_cmd->job_step = ipr_set_supported_devs;
5531 return IPR_RC_JOB_RETURN;
5534 return IPR_RC_JOB_CONTINUE;
5538 * ipr_setup_write_cache - Disable write cache if needed
5539 * @ipr_cmd: ipr command struct
5541 * This function sets up adapters write cache to desired setting
5544 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5546 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5550 ipr_cmd->job_step = ipr_set_supported_devs;
5551 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5552 struct ipr_resource_entry, queue);
5554 if (ioa_cfg->cache_state != CACHE_DISABLED)
5555 return IPR_RC_JOB_CONTINUE;
5557 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5558 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5559 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5560 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5562 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5564 return IPR_RC_JOB_RETURN;
5568 * ipr_get_mode_page - Locate specified mode page
5569 * @mode_pages: mode page buffer
5570 * @page_code: page code to find
5571 * @len: minimum required length for mode page
5574 * pointer to mode page / NULL on failure
5576 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5577 u32 page_code, u32 len)
5579 struct ipr_mode_page_hdr *mode_hdr;
5583 if (!mode_pages || (mode_pages->hdr.length == 0))
5586 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5587 mode_hdr = (struct ipr_mode_page_hdr *)
5588 (mode_pages->data + mode_pages->hdr.block_desc_len);
5591 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5592 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5596 page_length = (sizeof(struct ipr_mode_page_hdr) +
5597 mode_hdr->page_length);
5598 length -= page_length;
5599 mode_hdr = (struct ipr_mode_page_hdr *)
5600 ((unsigned long)mode_hdr + page_length);
5607 * ipr_check_term_power - Check for term power errors
5608 * @ioa_cfg: ioa config struct
5609 * @mode_pages: IOAFP mode pages buffer
5611 * Check the IOAFP's mode page 28 for term power errors
5616 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5617 struct ipr_mode_pages *mode_pages)
5621 struct ipr_dev_bus_entry *bus;
5622 struct ipr_mode_page28 *mode_page;
5624 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5625 sizeof(struct ipr_mode_page28));
5627 entry_length = mode_page->entry_length;
5629 bus = mode_page->bus;
5631 for (i = 0; i < mode_page->num_entries; i++) {
5632 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5633 dev_err(&ioa_cfg->pdev->dev,
5634 "Term power is absent on scsi bus %d\n",
5638 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5643 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5644 * @ioa_cfg: ioa config struct
5646 * Looks through the config table checking for SES devices. If
5647 * the SES device is in the SES table indicating a maximum SCSI
5648 * bus speed, the speed is limited for the bus.
5653 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5658 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5659 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5660 ioa_cfg->bus_attr[i].bus_width);
5662 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5663 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5668 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5669 * @ioa_cfg: ioa config struct
5670 * @mode_pages: mode page 28 buffer
5672 * Updates mode page 28 based on driver configuration
5677 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5678 struct ipr_mode_pages *mode_pages)
5680 int i, entry_length;
5681 struct ipr_dev_bus_entry *bus;
5682 struct ipr_bus_attributes *bus_attr;
5683 struct ipr_mode_page28 *mode_page;
5685 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5686 sizeof(struct ipr_mode_page28));
5688 entry_length = mode_page->entry_length;
5690 /* Loop for each device bus entry */
5691 for (i = 0, bus = mode_page->bus;
5692 i < mode_page->num_entries;
5693 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5694 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5695 dev_err(&ioa_cfg->pdev->dev,
5696 "Invalid resource address reported: 0x%08X\n",
5697 IPR_GET_PHYS_LOC(bus->res_addr));
5701 bus_attr = &ioa_cfg->bus_attr[i];
5702 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5703 bus->bus_width = bus_attr->bus_width;
5704 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5705 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5706 if (bus_attr->qas_enabled)
5707 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5709 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5714 * ipr_build_mode_select - Build a mode select command
5715 * @ipr_cmd: ipr command struct
5716 * @res_handle: resource handle to send command to
5717 * @parm: Byte 2 of Mode Sense command
5718 * @dma_addr: DMA buffer address
5719 * @xfer_len: data transfer length
5724 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5725 __be32 res_handle, u8 parm, u32 dma_addr,
5728 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5729 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5731 ioarcb->res_handle = res_handle;
5732 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5733 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5734 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5735 ioarcb->cmd_pkt.cdb[1] = parm;
5736 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5738 ioadl->flags_and_data_len =
5739 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5740 ioadl->address = cpu_to_be32(dma_addr);
5741 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5742 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5746 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5747 * @ipr_cmd: ipr command struct
5749 * This function sets up the SCSI bus attributes and sends
5750 * a Mode Select for Page 28 to activate them.
5755 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5757 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5758 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5762 ipr_scsi_bus_speed_limit(ioa_cfg);
5763 ipr_check_term_power(ioa_cfg, mode_pages);
5764 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5765 length = mode_pages->hdr.length + 1;
5766 mode_pages->hdr.length = 0;
5768 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5769 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5772 ipr_cmd->job_step = ipr_setup_write_cache;
5773 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5776 return IPR_RC_JOB_RETURN;
5780 * ipr_build_mode_sense - Builds a mode sense command
5781 * @ipr_cmd: ipr command struct
5782 * @res: resource entry struct
5783 * @parm: Byte 2 of mode sense command
5784 * @dma_addr: DMA address of mode sense buffer
5785 * @xfer_len: Size of DMA buffer
5790 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5792 u8 parm, u32 dma_addr, u8 xfer_len)
5794 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5795 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5797 ioarcb->res_handle = res_handle;
5798 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5799 ioarcb->cmd_pkt.cdb[2] = parm;
5800 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5801 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5803 ioadl->flags_and_data_len =
5804 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5805 ioadl->address = cpu_to_be32(dma_addr);
5806 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5807 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5811 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5812 * @ipr_cmd: ipr command struct
5814 * This function handles the failure of an IOA bringup command.
5819 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5821 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5822 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5824 dev_err(&ioa_cfg->pdev->dev,
5825 "0x%02X failed with IOASC: 0x%08X\n",
5826 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5828 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5830 return IPR_RC_JOB_RETURN;
5834 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5835 * @ipr_cmd: ipr command struct
5837 * This function handles the failure of a Mode Sense to the IOAFP.
5838 * Some adapters do not handle all mode pages.
5841 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5843 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5845 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5847 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5848 ipr_cmd->job_step = ipr_setup_write_cache;
5849 return IPR_RC_JOB_CONTINUE;
5852 return ipr_reset_cmd_failed(ipr_cmd);
5856 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5857 * @ipr_cmd: ipr command struct
5859 * This function send a Page 28 mode sense to the IOA to
5860 * retrieve SCSI bus attributes.
5865 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5870 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5871 0x28, ioa_cfg->vpd_cbs_dma +
5872 offsetof(struct ipr_misc_cbs, mode_pages),
5873 sizeof(struct ipr_mode_pages));
5875 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5876 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5878 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5881 return IPR_RC_JOB_RETURN;
5885 * ipr_init_res_table - Initialize the resource table
5886 * @ipr_cmd: ipr command struct
5888 * This function looks through the existing resource table, comparing
5889 * it with the config table. This function will take care of old/new
5890 * devices and schedule adding/removing them from the mid-layer
5894 * IPR_RC_JOB_CONTINUE
5896 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5899 struct ipr_resource_entry *res, *temp;
5900 struct ipr_config_table_entry *cfgte;
5905 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5906 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5908 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5909 list_move_tail(&res->queue, &old_res);
5911 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5912 cfgte = &ioa_cfg->cfg_table->dev[i];
5915 list_for_each_entry_safe(res, temp, &old_res, queue) {
5916 if (!memcmp(&res->cfgte.res_addr,
5917 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5918 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5925 if (list_empty(&ioa_cfg->free_res_q)) {
5926 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5931 res = list_entry(ioa_cfg->free_res_q.next,
5932 struct ipr_resource_entry, queue);
5933 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5934 ipr_init_res_entry(res);
5939 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5942 list_for_each_entry_safe(res, temp, &old_res, queue) {
5944 res->del_from_ml = 1;
5945 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5946 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5948 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5952 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5955 return IPR_RC_JOB_CONTINUE;
5959 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5960 * @ipr_cmd: ipr command struct
5962 * This function sends a Query IOA Configuration command
5963 * to the adapter to retrieve the IOA configuration table.
5968 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5970 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5971 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5972 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5973 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5976 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5977 ucode_vpd->major_release, ucode_vpd->card_type,
5978 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5979 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5980 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5982 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5983 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5984 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5986 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5987 ioarcb->read_data_transfer_length =
5988 cpu_to_be32(sizeof(struct ipr_config_table));
5990 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5991 ioadl->flags_and_data_len =
5992 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5994 ipr_cmd->job_step = ipr_init_res_table;
5996 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5999 return IPR_RC_JOB_RETURN;
6003 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6004 * @ipr_cmd: ipr command struct
6006 * This utility function sends an inquiry to the adapter.
6011 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6012 u32 dma_addr, u8 xfer_len)
6014 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6015 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6018 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6019 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6021 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6022 ioarcb->cmd_pkt.cdb[1] = flags;
6023 ioarcb->cmd_pkt.cdb[2] = page;
6024 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6026 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6027 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6029 ioadl->address = cpu_to_be32(dma_addr);
6030 ioadl->flags_and_data_len =
6031 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6033 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6038 * ipr_inquiry_page_supported - Is the given inquiry page supported
6039 * @page0: inquiry page 0 buffer
6042 * This function determines if the specified inquiry page is supported.
6045 * 1 if page is supported / 0 if not
6047 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6051 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6052 if (page0->page[i] == page)
6059 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6060 * @ipr_cmd: ipr command struct
6062 * This function sends a Page 3 inquiry to the adapter
6063 * to retrieve software VPD information.
6066 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6068 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6070 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6071 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6075 if (!ipr_inquiry_page_supported(page0, 1))
6076 ioa_cfg->cache_state = CACHE_NONE;
6078 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6080 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6081 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6082 sizeof(struct ipr_inquiry_page3));
6085 return IPR_RC_JOB_RETURN;
6089 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6090 * @ipr_cmd: ipr command struct
6092 * This function sends a Page 0 inquiry to the adapter
6093 * to retrieve supported inquiry pages.
6096 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6098 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6105 /* Grab the type out of the VPD and store it away */
6106 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6108 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6110 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6112 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6113 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6114 sizeof(struct ipr_inquiry_page0));
6117 return IPR_RC_JOB_RETURN;
6121 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6122 * @ipr_cmd: ipr command struct
6124 * This function sends a standard inquiry to the adapter.
6129 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6134 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6136 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6137 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6138 sizeof(struct ipr_ioa_vpd));
6141 return IPR_RC_JOB_RETURN;
6145 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6146 * @ipr_cmd: ipr command struct
6148 * This function send an Identify Host Request Response Queue
6149 * command to establish the HRRQ with the adapter.
6154 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6157 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6160 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6162 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6163 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6165 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6166 ioarcb->cmd_pkt.cdb[2] =
6167 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6168 ioarcb->cmd_pkt.cdb[3] =
6169 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6170 ioarcb->cmd_pkt.cdb[4] =
6171 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6172 ioarcb->cmd_pkt.cdb[5] =
6173 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6174 ioarcb->cmd_pkt.cdb[7] =
6175 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6176 ioarcb->cmd_pkt.cdb[8] =
6177 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6179 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6181 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6184 return IPR_RC_JOB_RETURN;
6188 * ipr_reset_timer_done - Adapter reset timer function
6189 * @ipr_cmd: ipr command struct
6191 * Description: This function is used in adapter reset processing
6192 * for timing events. If the reset_cmd pointer in the IOA
6193 * config struct is not this adapter's we are doing nested
6194 * resets and fail_all_ops will take care of freeing the
6200 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6203 unsigned long lock_flags = 0;
6205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6207 if (ioa_cfg->reset_cmd == ipr_cmd) {
6208 list_del(&ipr_cmd->queue);
6209 ipr_cmd->done(ipr_cmd);
6212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6216 * ipr_reset_start_timer - Start a timer for adapter reset job
6217 * @ipr_cmd: ipr command struct
6218 * @timeout: timeout value
6220 * Description: This function is used in adapter reset processing
6221 * for timing events. If the reset_cmd pointer in the IOA
6222 * config struct is not this adapter's we are doing nested
6223 * resets and fail_all_ops will take care of freeing the
6229 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6230 unsigned long timeout)
6232 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6233 ipr_cmd->done = ipr_reset_ioa_job;
6235 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6236 ipr_cmd->timer.expires = jiffies + timeout;
6237 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6238 add_timer(&ipr_cmd->timer);
6242 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6243 * @ioa_cfg: ioa cfg struct
6248 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6250 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6252 /* Initialize Host RRQ pointers */
6253 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6254 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6255 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6256 ioa_cfg->toggle_bit = 1;
6258 /* Zero out config table */
6259 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6263 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6264 * @ipr_cmd: ipr command struct
6266 * This function reinitializes some control blocks and
6267 * enables destructive diagnostics on the adapter.
6272 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6274 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6275 volatile u32 int_reg;
6278 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6279 ipr_init_ioa_mem(ioa_cfg);
6281 ioa_cfg->allow_interrupts = 1;
6282 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6284 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6285 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6286 ioa_cfg->regs.clr_interrupt_mask_reg);
6287 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6288 return IPR_RC_JOB_CONTINUE;
6291 /* Enable destructive diagnostics on IOA */
6292 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6294 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6295 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6297 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6299 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6300 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6301 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6302 ipr_cmd->done = ipr_reset_ioa_job;
6303 add_timer(&ipr_cmd->timer);
6304 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6307 return IPR_RC_JOB_RETURN;
6311 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6312 * @ipr_cmd: ipr command struct
6314 * This function is invoked when an adapter dump has run out
6315 * of processing time.
6318 * IPR_RC_JOB_CONTINUE
6320 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6322 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6324 if (ioa_cfg->sdt_state == GET_DUMP)
6325 ioa_cfg->sdt_state = ABORT_DUMP;
6327 ipr_cmd->job_step = ipr_reset_alert;
6329 return IPR_RC_JOB_CONTINUE;
6333 * ipr_unit_check_no_data - Log a unit check/no data error log
6334 * @ioa_cfg: ioa config struct
6336 * Logs an error indicating the adapter unit checked, but for some
6337 * reason, we were unable to fetch the unit check buffer.
6342 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6344 ioa_cfg->errors_logged++;
6345 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6349 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6350 * @ioa_cfg: ioa config struct
6352 * Fetches the unit check buffer from the adapter by clocking the data
6353 * through the mailbox register.
6358 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6360 unsigned long mailbox;
6361 struct ipr_hostrcb *hostrcb;
6362 struct ipr_uc_sdt sdt;
6366 mailbox = readl(ioa_cfg->ioa_mailbox);
6368 if (!ipr_sdt_is_fmt2(mailbox)) {
6369 ipr_unit_check_no_data(ioa_cfg);
6373 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6374 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6375 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6377 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6378 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6379 ipr_unit_check_no_data(ioa_cfg);
6383 /* Find length of the first sdt entry (UC buffer) */
6384 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6385 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6387 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6388 struct ipr_hostrcb, queue);
6389 list_del(&hostrcb->queue);
6390 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6392 rc = ipr_get_ldump_data_section(ioa_cfg,
6393 be32_to_cpu(sdt.entry[0].bar_str_offset),
6394 (__be32 *)&hostrcb->hcam,
6395 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6398 ipr_handle_log_data(ioa_cfg, hostrcb);
6399 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6400 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6401 ioa_cfg->sdt_state == GET_DUMP)
6402 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6404 ipr_unit_check_no_data(ioa_cfg);
6406 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6410 * ipr_reset_restore_cfg_space - Restore PCI config space.
6411 * @ipr_cmd: ipr command struct
6413 * Description: This function restores the saved PCI config space of
6414 * the adapter, fails all outstanding ops back to the callers, and
6415 * fetches the dump/unit check if applicable to this reset.
6418 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6420 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6426 rc = pci_restore_state(ioa_cfg->pdev);
6428 if (rc != PCIBIOS_SUCCESSFUL) {
6429 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6430 return IPR_RC_JOB_CONTINUE;
6433 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6434 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6435 return IPR_RC_JOB_CONTINUE;
6438 ipr_fail_all_ops(ioa_cfg);
6440 if (ioa_cfg->ioa_unit_checked) {
6441 ioa_cfg->ioa_unit_checked = 0;
6442 ipr_get_unit_check_buffer(ioa_cfg);
6443 ipr_cmd->job_step = ipr_reset_alert;
6444 ipr_reset_start_timer(ipr_cmd, 0);
6445 return IPR_RC_JOB_RETURN;
6448 if (ioa_cfg->in_ioa_bringdown) {
6449 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6451 ipr_cmd->job_step = ipr_reset_enable_ioa;
6453 if (GET_DUMP == ioa_cfg->sdt_state) {
6454 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6455 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6456 schedule_work(&ioa_cfg->work_q);
6457 return IPR_RC_JOB_RETURN;
6462 return IPR_RC_JOB_CONTINUE;
6466 * ipr_reset_bist_done - BIST has completed on the adapter.
6467 * @ipr_cmd: ipr command struct
6469 * Description: Unblock config space and resume the reset process.
6472 * IPR_RC_JOB_CONTINUE
6474 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6477 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6478 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6480 return IPR_RC_JOB_CONTINUE;
6484 * ipr_reset_start_bist - Run BIST on the adapter.
6485 * @ipr_cmd: ipr command struct
6487 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6490 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6492 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6494 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6498 pci_block_user_cfg_access(ioa_cfg->pdev);
6499 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6501 if (rc != PCIBIOS_SUCCESSFUL) {
6502 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6503 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6504 rc = IPR_RC_JOB_CONTINUE;
6506 ipr_cmd->job_step = ipr_reset_bist_done;
6507 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6508 rc = IPR_RC_JOB_RETURN;
6516 * ipr_reset_allowed - Query whether or not IOA can be reset
6517 * @ioa_cfg: ioa config struct
6520 * 0 if reset not allowed / non-zero if reset is allowed
6522 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6524 volatile u32 temp_reg;
6526 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6527 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6531 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6532 * @ipr_cmd: ipr command struct
6534 * Description: This function waits for adapter permission to run BIST,
6535 * then runs BIST. If the adapter does not give permission after a
6536 * reasonable time, we will reset the adapter anyway. The impact of
6537 * resetting the adapter without warning the adapter is the risk of
6538 * losing the persistent error log on the adapter. If the adapter is
6539 * reset while it is writing to the flash on the adapter, the flash
6540 * segment will have bad ECC and be zeroed.
6543 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6545 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6547 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6548 int rc = IPR_RC_JOB_RETURN;
6550 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6551 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6552 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6554 ipr_cmd->job_step = ipr_reset_start_bist;
6555 rc = IPR_RC_JOB_CONTINUE;
6562 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6563 * @ipr_cmd: ipr command struct
6565 * Description: This function alerts the adapter that it will be reset.
6566 * If memory space is not currently enabled, proceed directly
6567 * to running BIST on the adapter. The timer must always be started
6568 * so we guarantee we do not run BIST from ipr_isr.
6573 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6575 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6580 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6582 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6583 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6584 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6585 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6587 ipr_cmd->job_step = ipr_reset_start_bist;
6590 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6591 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6594 return IPR_RC_JOB_RETURN;
6598 * ipr_reset_ucode_download_done - Microcode download completion
6599 * @ipr_cmd: ipr command struct
6601 * Description: This function unmaps the microcode download buffer.
6604 * IPR_RC_JOB_CONTINUE
6606 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6608 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6609 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6611 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6612 sglist->num_sg, DMA_TO_DEVICE);
6614 ipr_cmd->job_step = ipr_reset_alert;
6615 return IPR_RC_JOB_CONTINUE;
6619 * ipr_reset_ucode_download - Download microcode to the adapter
6620 * @ipr_cmd: ipr command struct
6622 * Description: This function checks to see if it there is microcode
6623 * to download to the adapter. If there is, a download is performed.
6626 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6628 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6630 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6631 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6634 ipr_cmd->job_step = ipr_reset_alert;
6637 return IPR_RC_JOB_CONTINUE;
6639 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6640 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6641 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6642 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6643 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6644 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6645 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6647 ipr_build_ucode_ioadl(ipr_cmd, sglist);
6648 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6650 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6651 IPR_WRITE_BUFFER_TIMEOUT);
6654 return IPR_RC_JOB_RETURN;
6658 * ipr_reset_shutdown_ioa - Shutdown the adapter
6659 * @ipr_cmd: ipr command struct
6661 * Description: This function issues an adapter shutdown of the
6662 * specified type to the specified adapter as part of the
6663 * adapter reset job.
6666 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6668 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6671 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6672 unsigned long timeout;
6673 int rc = IPR_RC_JOB_CONTINUE;
6676 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6677 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6678 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6679 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6680 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6682 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6683 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6684 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6685 timeout = IPR_INTERNAL_TIMEOUT;
6687 timeout = IPR_SHUTDOWN_TIMEOUT;
6689 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6691 rc = IPR_RC_JOB_RETURN;
6692 ipr_cmd->job_step = ipr_reset_ucode_download;
6694 ipr_cmd->job_step = ipr_reset_alert;
6701 * ipr_reset_ioa_job - Adapter reset job
6702 * @ipr_cmd: ipr command struct
6704 * Description: This function is the job router for the adapter reset job.
6709 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6712 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6715 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6717 if (ioa_cfg->reset_cmd != ipr_cmd) {
6719 * We are doing nested adapter resets and this is
6720 * not the current reset job.
6722 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6726 if (IPR_IOASC_SENSE_KEY(ioasc)) {
6727 rc = ipr_cmd->job_step_failed(ipr_cmd);
6728 if (rc == IPR_RC_JOB_RETURN)
6732 ipr_reinit_ipr_cmnd(ipr_cmd);
6733 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6734 rc = ipr_cmd->job_step(ipr_cmd);
6735 } while(rc == IPR_RC_JOB_CONTINUE);
6739 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6740 * @ioa_cfg: ioa config struct
6741 * @job_step: first job step of reset job
6742 * @shutdown_type: shutdown type
6744 * Description: This function will initiate the reset of the given adapter
6745 * starting at the selected job step.
6746 * If the caller needs to wait on the completion of the reset,
6747 * the caller must sleep on the reset_wait_q.
6752 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6753 int (*job_step) (struct ipr_cmnd *),
6754 enum ipr_shutdown_type shutdown_type)
6756 struct ipr_cmnd *ipr_cmd;
6758 ioa_cfg->in_reset_reload = 1;
6759 ioa_cfg->allow_cmds = 0;
6760 scsi_block_requests(ioa_cfg->host);
6762 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6763 ioa_cfg->reset_cmd = ipr_cmd;
6764 ipr_cmd->job_step = job_step;
6765 ipr_cmd->u.shutdown_type = shutdown_type;
6767 ipr_reset_ioa_job(ipr_cmd);
6771 * ipr_initiate_ioa_reset - Initiate an adapter reset
6772 * @ioa_cfg: ioa config struct
6773 * @shutdown_type: shutdown type
6775 * Description: This function will initiate the reset of the given adapter.
6776 * If the caller needs to wait on the completion of the reset,
6777 * the caller must sleep on the reset_wait_q.
6782 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6783 enum ipr_shutdown_type shutdown_type)
6785 if (ioa_cfg->ioa_is_dead)
6788 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6789 ioa_cfg->sdt_state = ABORT_DUMP;
6791 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6792 dev_err(&ioa_cfg->pdev->dev,
6793 "IOA taken offline - error recovery failed\n");
6795 ioa_cfg->reset_retries = 0;
6796 ioa_cfg->ioa_is_dead = 1;
6798 if (ioa_cfg->in_ioa_bringdown) {
6799 ioa_cfg->reset_cmd = NULL;
6800 ioa_cfg->in_reset_reload = 0;
6801 ipr_fail_all_ops(ioa_cfg);
6802 wake_up_all(&ioa_cfg->reset_wait_q);
6804 spin_unlock_irq(ioa_cfg->host->host_lock);
6805 scsi_unblock_requests(ioa_cfg->host);
6806 spin_lock_irq(ioa_cfg->host->host_lock);
6809 ioa_cfg->in_ioa_bringdown = 1;
6810 shutdown_type = IPR_SHUTDOWN_NONE;
6814 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6819 * ipr_reset_freeze - Hold off all I/O activity
6820 * @ipr_cmd: ipr command struct
6822 * Description: If the PCI slot is frozen, hold off all I/O
6823 * activity; then, as soon as the slot is available again,
6824 * initiate an adapter reset.
6826 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6828 /* Disallow new interrupts, avoid loop */
6829 ipr_cmd->ioa_cfg->allow_interrupts = 0;
6830 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6831 ipr_cmd->done = ipr_reset_ioa_job;
6832 return IPR_RC_JOB_RETURN;
6836 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6837 * @pdev: PCI device struct
6839 * Description: This routine is called to tell us that the PCI bus
6840 * is down. Can't do anything here, except put the device driver
6841 * into a holding pattern, waiting for the PCI bus to come back.
6843 static void ipr_pci_frozen(struct pci_dev *pdev)
6845 unsigned long flags = 0;
6846 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6848 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6849 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6854 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6855 * @pdev: PCI device struct
6857 * Description: This routine is called by the pci error recovery
6858 * code after the PCI slot has been reset, just before we
6859 * should resume normal operations.
6861 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6863 unsigned long flags = 0;
6864 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6866 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6867 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6869 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6870 return PCI_ERS_RESULT_RECOVERED;
6874 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6875 * @pdev: PCI device struct
6877 * Description: This routine is called when the PCI bus has
6878 * permanently failed.
6880 static void ipr_pci_perm_failure(struct pci_dev *pdev)
6882 unsigned long flags = 0;
6883 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6885 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6886 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6887 ioa_cfg->sdt_state = ABORT_DUMP;
6888 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6889 ioa_cfg->in_ioa_bringdown = 1;
6890 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6895 * ipr_pci_error_detected - Called when a PCI error is detected.
6896 * @pdev: PCI device struct
6897 * @state: PCI channel state
6899 * Description: Called when a PCI error is detected.
6902 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6904 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6905 pci_channel_state_t state)
6908 case pci_channel_io_frozen:
6909 ipr_pci_frozen(pdev);
6910 return PCI_ERS_RESULT_NEED_RESET;
6911 case pci_channel_io_perm_failure:
6912 ipr_pci_perm_failure(pdev);
6913 return PCI_ERS_RESULT_DISCONNECT;
6918 return PCI_ERS_RESULT_NEED_RESET;
6922 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6923 * @ioa_cfg: ioa cfg struct
6925 * Description: This is the second phase of adapter intialization
6926 * This function takes care of initilizing the adapter to the point
6927 * where it can accept new commands.
6930 * 0 on sucess / -EIO on failure
6932 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6935 unsigned long host_lock_flags = 0;
6938 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6939 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6940 if (ioa_cfg->needs_hard_reset) {
6941 ioa_cfg->needs_hard_reset = 0;
6942 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6944 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6947 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6948 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6949 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6951 if (ioa_cfg->ioa_is_dead) {
6953 } else if (ipr_invalid_adapter(ioa_cfg)) {
6957 dev_err(&ioa_cfg->pdev->dev,
6958 "Adapter not supported in this hardware configuration.\n");
6961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6968 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6969 * @ioa_cfg: ioa config struct
6974 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6978 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6979 if (ioa_cfg->ipr_cmnd_list[i])
6980 pci_pool_free(ioa_cfg->ipr_cmd_pool,
6981 ioa_cfg->ipr_cmnd_list[i],
6982 ioa_cfg->ipr_cmnd_list_dma[i]);
6984 ioa_cfg->ipr_cmnd_list[i] = NULL;
6987 if (ioa_cfg->ipr_cmd_pool)
6988 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6990 ioa_cfg->ipr_cmd_pool = NULL;
6994 * ipr_free_mem - Frees memory allocated for an adapter
6995 * @ioa_cfg: ioa cfg struct
7000 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7004 kfree(ioa_cfg->res_entries);
7005 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7006 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7007 ipr_free_cmd_blks(ioa_cfg);
7008 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7009 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7010 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7012 ioa_cfg->cfg_table_dma);
7014 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7015 pci_free_consistent(ioa_cfg->pdev,
7016 sizeof(struct ipr_hostrcb),
7017 ioa_cfg->hostrcb[i],
7018 ioa_cfg->hostrcb_dma[i]);
7021 ipr_free_dump(ioa_cfg);
7022 kfree(ioa_cfg->trace);
7026 * ipr_free_all_resources - Free all allocated resources for an adapter.
7027 * @ipr_cmd: ipr command struct
7029 * This function frees all allocated resources for the
7030 * specified adapter.
7035 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7037 struct pci_dev *pdev = ioa_cfg->pdev;
7040 free_irq(pdev->irq, ioa_cfg);
7041 iounmap(ioa_cfg->hdw_dma_regs);
7042 pci_release_regions(pdev);
7043 ipr_free_mem(ioa_cfg);
7044 scsi_host_put(ioa_cfg->host);
7045 pci_disable_device(pdev);
7050 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7051 * @ioa_cfg: ioa config struct
7054 * 0 on success / -ENOMEM on allocation failure
7056 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7058 struct ipr_cmnd *ipr_cmd;
7059 struct ipr_ioarcb *ioarcb;
7060 dma_addr_t dma_addr;
7063 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7064 sizeof(struct ipr_cmnd), 8, 0);
7066 if (!ioa_cfg->ipr_cmd_pool)
7069 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7070 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7073 ipr_free_cmd_blks(ioa_cfg);
7077 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7078 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7079 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7081 ioarcb = &ipr_cmd->ioarcb;
7082 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7083 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7084 ioarcb->write_ioadl_addr =
7085 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7086 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7087 ioarcb->ioasa_host_pci_addr =
7088 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7089 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7090 ipr_cmd->cmd_index = i;
7091 ipr_cmd->ioa_cfg = ioa_cfg;
7092 ipr_cmd->sense_buffer_dma = dma_addr +
7093 offsetof(struct ipr_cmnd, sense_buffer);
7095 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7102 * ipr_alloc_mem - Allocate memory for an adapter
7103 * @ioa_cfg: ioa config struct
7106 * 0 on success / non-zero for error
7108 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7110 struct pci_dev *pdev = ioa_cfg->pdev;
7111 int i, rc = -ENOMEM;
7114 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7115 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7117 if (!ioa_cfg->res_entries)
7120 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7121 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7123 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7124 sizeof(struct ipr_misc_cbs),
7125 &ioa_cfg->vpd_cbs_dma);
7127 if (!ioa_cfg->vpd_cbs)
7128 goto out_free_res_entries;
7130 if (ipr_alloc_cmd_blks(ioa_cfg))
7131 goto out_free_vpd_cbs;
7133 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7134 sizeof(u32) * IPR_NUM_CMD_BLKS,
7135 &ioa_cfg->host_rrq_dma);
7137 if (!ioa_cfg->host_rrq)
7138 goto out_ipr_free_cmd_blocks;
7140 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7141 sizeof(struct ipr_config_table),
7142 &ioa_cfg->cfg_table_dma);
7144 if (!ioa_cfg->cfg_table)
7145 goto out_free_host_rrq;
7147 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7148 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7149 sizeof(struct ipr_hostrcb),
7150 &ioa_cfg->hostrcb_dma[i]);
7152 if (!ioa_cfg->hostrcb[i])
7153 goto out_free_hostrcb_dma;
7155 ioa_cfg->hostrcb[i]->hostrcb_dma =
7156 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7157 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7158 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7161 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7162 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7164 if (!ioa_cfg->trace)
7165 goto out_free_hostrcb_dma;
7172 out_free_hostrcb_dma:
7174 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7175 ioa_cfg->hostrcb[i],
7176 ioa_cfg->hostrcb_dma[i]);
7178 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7179 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7181 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7182 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7183 out_ipr_free_cmd_blocks:
7184 ipr_free_cmd_blks(ioa_cfg);
7186 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7187 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7188 out_free_res_entries:
7189 kfree(ioa_cfg->res_entries);
7194 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7195 * @ioa_cfg: ioa config struct
7200 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7204 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7205 ioa_cfg->bus_attr[i].bus = i;
7206 ioa_cfg->bus_attr[i].qas_enabled = 0;
7207 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7208 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7209 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7211 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7216 * ipr_init_ioa_cfg - Initialize IOA config struct
7217 * @ioa_cfg: ioa config struct
7218 * @host: scsi host struct
7219 * @pdev: PCI dev struct
7224 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7225 struct Scsi_Host *host, struct pci_dev *pdev)
7227 const struct ipr_interrupt_offsets *p;
7228 struct ipr_interrupts *t;
7231 ioa_cfg->host = host;
7232 ioa_cfg->pdev = pdev;
7233 ioa_cfg->log_level = ipr_log_level;
7234 ioa_cfg->doorbell = IPR_DOORBELL;
7235 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7236 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7237 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7238 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7239 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7240 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7241 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7242 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7244 INIT_LIST_HEAD(&ioa_cfg->free_q);
7245 INIT_LIST_HEAD(&ioa_cfg->pending_q);
7246 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7247 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7248 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7249 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7250 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7251 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7252 ioa_cfg->sdt_state = INACTIVE;
7253 if (ipr_enable_cache)
7254 ioa_cfg->cache_state = CACHE_ENABLED;
7256 ioa_cfg->cache_state = CACHE_DISABLED;
7258 ipr_initialize_bus_attr(ioa_cfg);
7260 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7261 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7262 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7263 host->unique_id = host->host_no;
7264 host->max_cmd_len = IPR_MAX_CDB_LEN;
7265 pci_set_drvdata(pdev, ioa_cfg);
7267 p = &ioa_cfg->chip_cfg->regs;
7269 base = ioa_cfg->hdw_dma_regs;
7271 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7272 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7273 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7274 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7275 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7276 t->ioarrin_reg = base + p->ioarrin_reg;
7277 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7278 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7279 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7283 * ipr_get_chip_cfg - Find adapter chip configuration
7284 * @dev_id: PCI device id struct
7287 * ptr to chip config on success / NULL on failure
7289 static const struct ipr_chip_cfg_t * __devinit
7290 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7294 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7295 if (ipr_chip[i].vendor == dev_id->vendor &&
7296 ipr_chip[i].device == dev_id->device)
7297 return ipr_chip[i].cfg;
7302 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7303 * @pdev: PCI device struct
7304 * @dev_id: PCI device id struct
7307 * 0 on success / non-zero on failure
7309 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7310 const struct pci_device_id *dev_id)
7312 struct ipr_ioa_cfg *ioa_cfg;
7313 struct Scsi_Host *host;
7314 unsigned long ipr_regs_pci;
7315 void __iomem *ipr_regs;
7316 int rc = PCIBIOS_SUCCESSFUL;
7317 volatile u32 mask, uproc;
7321 if ((rc = pci_enable_device(pdev))) {
7322 dev_err(&pdev->dev, "Cannot enable adapter\n");
7326 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7328 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7331 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7336 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7337 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7338 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7339 sata_port_info.flags, &ipr_sata_ops);
7341 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7343 if (!ioa_cfg->chip_cfg) {
7344 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7345 dev_id->vendor, dev_id->device);
7346 goto out_scsi_host_put;
7349 if (ipr_transop_timeout)
7350 ioa_cfg->transop_timeout = ipr_transop_timeout;
7351 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7352 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7354 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7356 ipr_regs_pci = pci_resource_start(pdev, 0);
7358 rc = pci_request_regions(pdev, IPR_NAME);
7361 "Couldn't register memory range of registers\n");
7362 goto out_scsi_host_put;
7365 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7369 "Couldn't map memory range of registers\n");
7371 goto out_release_regions;
7374 ioa_cfg->hdw_dma_regs = ipr_regs;
7375 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7376 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7378 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7380 pci_set_master(pdev);
7382 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7384 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7388 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7389 ioa_cfg->chip_cfg->cache_line_size);
7391 if (rc != PCIBIOS_SUCCESSFUL) {
7392 dev_err(&pdev->dev, "Write of cache line size failed\n");
7397 /* Save away PCI config space for use following IOA reset */
7398 rc = pci_save_state(pdev);
7400 if (rc != PCIBIOS_SUCCESSFUL) {
7401 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7406 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7409 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7412 rc = ipr_alloc_mem(ioa_cfg);
7415 "Couldn't allocate enough memory for device driver!\n");
7420 * If HRRQ updated interrupt is not masked, or reset alert is set,
7421 * the card is in an unknown state and needs a hard reset
7423 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7424 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7425 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7426 ioa_cfg->needs_hard_reset = 1;
7428 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7429 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7432 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7437 spin_lock(&ipr_driver_lock);
7438 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7439 spin_unlock(&ipr_driver_lock);
7446 ipr_free_mem(ioa_cfg);
7449 out_release_regions:
7450 pci_release_regions(pdev);
7452 scsi_host_put(host);
7454 pci_disable_device(pdev);
7459 * ipr_scan_vsets - Scans for VSET devices
7460 * @ioa_cfg: ioa config struct
7462 * Description: Since the VSET resources do not follow SAM in that we can have
7463 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7468 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7472 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7473 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7474 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7478 * ipr_initiate_ioa_bringdown - Bring down an adapter
7479 * @ioa_cfg: ioa config struct
7480 * @shutdown_type: shutdown type
7482 * Description: This function will initiate bringing down the adapter.
7483 * This consists of issuing an IOA shutdown to the adapter
7484 * to flush the cache, and running BIST.
7485 * If the caller needs to wait on the completion of the reset,
7486 * the caller must sleep on the reset_wait_q.
7491 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7492 enum ipr_shutdown_type shutdown_type)
7495 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7496 ioa_cfg->sdt_state = ABORT_DUMP;
7497 ioa_cfg->reset_retries = 0;
7498 ioa_cfg->in_ioa_bringdown = 1;
7499 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7504 * __ipr_remove - Remove a single adapter
7505 * @pdev: pci device struct
7507 * Adapter hot plug remove entry point.
7512 static void __ipr_remove(struct pci_dev *pdev)
7514 unsigned long host_lock_flags = 0;
7515 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7518 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7519 while(ioa_cfg->in_reset_reload) {
7520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7521 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7522 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7525 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7528 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7529 flush_scheduled_work();
7530 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7532 spin_lock(&ipr_driver_lock);
7533 list_del(&ioa_cfg->queue);
7534 spin_unlock(&ipr_driver_lock);
7536 if (ioa_cfg->sdt_state == ABORT_DUMP)
7537 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7540 ipr_free_all_resources(ioa_cfg);
7546 * ipr_remove - IOA hot plug remove entry point
7547 * @pdev: pci device struct
7549 * Adapter hot plug remove entry point.
7554 static void ipr_remove(struct pci_dev *pdev)
7556 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7560 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7562 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7564 scsi_remove_host(ioa_cfg->host);
7572 * ipr_probe - Adapter hot plug add entry point
7575 * 0 on success / non-zero on failure
7577 static int __devinit ipr_probe(struct pci_dev *pdev,
7578 const struct pci_device_id *dev_id)
7580 struct ipr_ioa_cfg *ioa_cfg;
7583 rc = ipr_probe_ioa(pdev, dev_id);
7588 ioa_cfg = pci_get_drvdata(pdev);
7589 rc = ipr_probe_ioa_part2(ioa_cfg);
7596 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7603 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7607 scsi_remove_host(ioa_cfg->host);
7612 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7616 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7618 scsi_remove_host(ioa_cfg->host);
7623 scsi_scan_host(ioa_cfg->host);
7624 ipr_scan_vsets(ioa_cfg);
7625 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7626 ioa_cfg->allow_ml_add_del = 1;
7627 ioa_cfg->host->max_channel = IPR_VSET_BUS;
7628 schedule_work(&ioa_cfg->work_q);
7633 * ipr_shutdown - Shutdown handler.
7634 * @pdev: pci device struct
7636 * This function is invoked upon system shutdown/reboot. It will issue
7637 * an adapter shutdown to the adapter to flush the write cache.
7642 static void ipr_shutdown(struct pci_dev *pdev)
7644 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7645 unsigned long lock_flags = 0;
7647 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7648 while(ioa_cfg->in_reset_reload) {
7649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7650 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7651 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7654 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7656 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7659 static struct pci_device_id ipr_pci_table[] __devinitdata = {
7660 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7662 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7663 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7664 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7665 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7666 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7668 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7670 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7672 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7674 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7676 IPR_USE_LONG_TRANSOP_TIMEOUT },
7677 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7679 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7680 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7681 IPR_USE_LONG_TRANSOP_TIMEOUT },
7682 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7684 IPR_USE_LONG_TRANSOP_TIMEOUT },
7685 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7687 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7688 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7689 IPR_USE_LONG_TRANSOP_TIMEOUT},
7690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7692 IPR_USE_LONG_TRANSOP_TIMEOUT },
7693 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7695 IPR_USE_LONG_TRANSOP_TIMEOUT },
7696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7698 IPR_USE_LONG_TRANSOP_TIMEOUT },
7699 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7700 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7701 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7703 IPR_USE_LONG_TRANSOP_TIMEOUT },
7704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7706 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7708 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7710 IPR_USE_LONG_TRANSOP_TIMEOUT },
7711 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7712 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7713 IPR_USE_LONG_TRANSOP_TIMEOUT },
7714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7716 IPR_USE_LONG_TRANSOP_TIMEOUT },
7719 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7721 static struct pci_error_handlers ipr_err_handler = {
7722 .error_detected = ipr_pci_error_detected,
7723 .slot_reset = ipr_pci_slot_reset,
7726 static struct pci_driver ipr_driver = {
7728 .id_table = ipr_pci_table,
7730 .remove = ipr_remove,
7731 .shutdown = ipr_shutdown,
7732 .err_handler = &ipr_err_handler,
7733 .dynids.use_driver_data = 1
7737 * ipr_init - Module entry point
7740 * 0 on success / negative value on failure
7742 static int __init ipr_init(void)
7744 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7745 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7747 return pci_register_driver(&ipr_driver);
7751 * ipr_exit - Module unload
7753 * Module unload entry point.
7758 static void __exit ipr_exit(void)
7760 pci_unregister_driver(&ipr_driver);
7763 module_init(ipr_init);
7764 module_exit(ipr_exit);