2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
87 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89 static unsigned int ipr_max_speed = 1;
90 static int ipr_testmode = 0;
91 static unsigned int ipr_fastfail = 0;
92 static unsigned int ipr_transop_timeout = 0;
93 static unsigned int ipr_enable_cache = 1;
94 static unsigned int ipr_debug = 0;
95 static DEFINE_SPINLOCK(ipr_driver_lock);
97 /* This table describes the differences between DMA controller chips */
98 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101 .cache_line_size = 0x20,
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
114 { /* Snipe and Scamp */
116 .cache_line_size = 0x20,
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
131 static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
141 static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147 module_param_named(max_speed, ipr_max_speed, uint, 0);
148 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149 module_param_named(log_level, ipr_log_level, uint, 0);
150 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151 module_param_named(testmode, ipr_testmode, int, 0);
152 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153 module_param_named(fastfail, ipr_fastfail, int, 0);
154 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
157 module_param_named(enable_cache, ipr_enable_cache, int, 0);
158 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159 module_param_named(debug, ipr_debug, int, 0);
160 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
161 MODULE_LICENSE("GPL");
162 MODULE_VERSION(IPR_DRIVER_VERSION);
164 /* A constant array of IOASCs/URCs/Error Messages */
166 struct ipr_error_table_t ipr_error_table[] = {
167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
168 "8155: An unknown error was received"},
170 "Soft underlength error"},
172 "Command to be cancelled not found"},
174 "Qualified success"},
175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
176 "FFFE: Soft device bus error recovered by the IOA"},
177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
178 "4101: Soft device bus fabric error"},
179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
180 "FFF9: Device sector reassign successful"},
181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
182 "FFF7: Media error recovered by device rewrite procedures"},
183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
184 "7001: IOA sector reassignment successful"},
185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
186 "FFF9: Soft media error. Sector reassignment recommended"},
187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
188 "FFF7: Media error recovered by IOA rewrite procedures"},
189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
190 "FF3D: Soft PCI bus error recovered by the IOA"},
191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
192 "FFF6: Device hardware error recovered by the IOA"},
193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
194 "FFF6: Device hardware error recovered by the device"},
195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
196 "FF3D: Soft IOA error recovered by the IOA"},
197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
198 "FFFA: Undefined device response recovered by the IOA"},
199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
200 "FFF6: Device bus error, message or command phase"},
201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
202 "FFFE: Task Management Function failed"},
203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
204 "FFF6: Failure prediction threshold exceeded"},
205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
206 "8009: Impending cache battery pack failure"},
208 "34FF: Disk device format in progress"},
210 "Synchronization required"},
212 "No ready, IOA shutdown"},
214 "Not ready, IOA has been shutdown"},
215 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
216 "3020: Storage subsystem configuration error"},
218 "FFF5: Medium error, data unreadable, recommend reassign"},
220 "7000: Medium error, data unreadable, do not reassign"},
221 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
222 "FFF3: Disk media format bad"},
223 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
224 "3002: Addressed device failed to respond to selection"},
225 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
226 "3100: Device bus error"},
227 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
228 "3109: IOA timed out a device command"},
230 "3120: SCSI bus is not operational"},
231 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
232 "4100: Hard device bus fabric error"},
233 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "9000: IOA reserved area data check"},
235 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
236 "9001: IOA reserved area invalid data pattern"},
237 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
238 "9002: IOA reserved area LRC error"},
239 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "102E: Out of alternate sectors for disk storage"},
241 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
242 "FFF4: Data transfer underlength error"},
243 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
244 "FFF4: Data transfer overlength error"},
245 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "3400: Logical unit failure"},
247 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFF4: Device microcode is corrupt"},
249 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
250 "8150: PCI bus error"},
252 "Unsupported device bus message received"},
253 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
254 "FFF4: Disk device problem"},
255 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
256 "8150: Permanent IOA failure"},
257 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
258 "3010: Disk device returned wrong response to IOA"},
259 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
260 "8151: IOA microcode error"},
262 "Device bus status error"},
263 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
264 "8157: IOA error requiring IOA reset to recover"},
266 "ATA device status error"},
268 "Message reject received from the device"},
269 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
270 "8008: A permanent cache battery pack failure occurred"},
271 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
272 "9090: Disk unit has been modified after the last known status"},
273 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
274 "9081: IOA detected device error"},
275 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
276 "9082: IOA detected device error"},
277 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
278 "3110: Device bus error, message or command phase"},
279 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
280 "3110: SAS Command / Task Management Function failed"},
281 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
282 "9091: Incorrect hardware configuration change has been detected"},
283 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9073: Invalid multi-adapter configuration"},
285 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
286 "4010: Incorrect connection between cascaded expanders"},
287 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
288 "4020: Connections exceed IOA design limits"},
289 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
290 "4030: Incorrect multipath connection"},
291 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
292 "4110: Unsupported enclosure function"},
293 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
294 "FFF4: Command to logical unit failed"},
296 "Illegal request, invalid request type or request packet"},
298 "Illegal request, invalid resource handle"},
300 "Illegal request, commands not allowed to this device"},
302 "Illegal request, command not allowed to a secondary adapter"},
304 "Illegal request, invalid field in parameter list"},
306 "Illegal request, parameter not supported"},
308 "Illegal request, parameter value invalid"},
310 "Illegal request, command sequence error"},
312 "Illegal request, dual adapter support not enabled"},
313 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
314 "9031: Array protection temporarily suspended, protection resuming"},
315 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
316 "9040: Array protection temporarily suspended, protection resuming"},
317 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
318 "3140: Device bus not ready to ready transition"},
319 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "FFFB: SCSI bus was reset"},
322 "FFFE: SCSI bus transition to single ended"},
324 "FFFE: SCSI bus transition to LVD"},
325 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
326 "FFFB: SCSI bus was reset by another initiator"},
327 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "3029: A device replacement has occurred"},
329 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
330 "9051: IOA cache data exists for a missing or failed device"},
331 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
332 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
333 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
334 "9025: Disk unit is not supported at its physical location"},
335 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
336 "3020: IOA detected a SCSI bus configuration error"},
337 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
338 "3150: SCSI bus configuration error"},
339 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
340 "9074: Asymmetric advanced function disk configuration"},
341 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
342 "4040: Incomplete multipath connection between IOA and enclosure"},
343 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
344 "4041: Incomplete multipath connection between enclosure and device"},
345 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
346 "9075: Incomplete multipath connection between IOA and remote IOA"},
347 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
348 "9076: Configuration error, missing remote IOA"},
349 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
350 "4050: Enclosure does not support a required multipath function"},
351 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
352 "9041: Array protection temporarily suspended"},
353 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
354 "9042: Corrupt array parity detected on specified device"},
355 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
356 "9030: Array no longer protected due to missing or failed disk unit"},
357 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
358 "9071: Link operational transition"},
359 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
360 "9072: Link not operational transition"},
361 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
362 "9032: Array exposed but still protected"},
363 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
364 "70DD: Device forced failed by disrupt device command"},
365 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
366 "4061: Multipath redundancy level got better"},
367 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
368 "4060: Multipath redundancy level got worse"},
370 "Failure due to other device"},
371 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
372 "9008: IOA does not support functions expected by devices"},
373 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
374 "9010: Cache data associated with attached devices cannot be found"},
375 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
376 "9011: Cache data belongs to devices other than those attached"},
377 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
378 "9020: Array missing 2 or more devices with only 1 device present"},
379 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
380 "9021: Array missing 2 or more devices with 2 or more devices present"},
381 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9022: Exposed array is missing a required device"},
383 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
384 "9023: Array member(s) not at required physical locations"},
385 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
386 "9024: Array not functional due to present hardware configuration"},
387 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
388 "9026: Array not functional due to present hardware configuration"},
389 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
390 "9027: Array is missing a device and parity is out of sync"},
391 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
392 "9028: Maximum number of arrays already exist"},
393 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
394 "9050: Required cache data cannot be located for a disk unit"},
395 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
396 "9052: Cache data exists for a device that has been modified"},
397 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
398 "9054: IOA resources not available due to previous problems"},
399 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
400 "9092: Disk unit requires initialization before use"},
401 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
402 "9029: Incorrect hardware configuration change has been detected"},
403 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
404 "9060: One or more disk pairs are missing from an array"},
405 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
406 "9061: One or more disks are missing from an array"},
407 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
408 "9062: One or more disks are missing from an array"},
409 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
410 "9063: Maximum number of functional arrays has been exceeded"},
412 "Aborted command, invalid descriptor"},
414 "Command terminated by host"}
417 static const struct ipr_ses_table_entry ipr_ses_table[] = {
418 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
419 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
420 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
421 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
422 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
423 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
424 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
425 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
426 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
427 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
429 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
430 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
434 * Function Prototypes
436 static int ipr_reset_alert(struct ipr_cmnd *);
437 static void ipr_process_ccn(struct ipr_cmnd *);
438 static void ipr_process_error(struct ipr_cmnd *);
439 static void ipr_reset_ioa_job(struct ipr_cmnd *);
440 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
441 enum ipr_shutdown_type);
443 #ifdef CONFIG_SCSI_IPR_TRACE
445 * ipr_trc_hook - Add a trace entry to the driver trace
446 * @ipr_cmd: ipr command struct
448 * @add_data: additional data
453 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
454 u8 type, u32 add_data)
456 struct ipr_trace_entry *trace_entry;
457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
459 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
460 trace_entry->time = jiffies;
461 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
462 trace_entry->type = type;
463 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
464 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
465 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
466 trace_entry->u.add_data = add_data;
469 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
473 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
474 * @ipr_cmd: ipr command struct
479 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
482 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
483 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 ioarcb->write_data_transfer_length = 0;
487 ioarcb->read_data_transfer_length = 0;
488 ioarcb->write_ioadl_len = 0;
489 ioarcb->read_ioadl_len = 0;
490 ioarcb->write_ioadl_addr =
491 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
492 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
494 ioasa->residual_data_len = 0;
495 ioasa->u.gata.status = 0;
497 ipr_cmd->scsi_cmd = NULL;
499 ipr_cmd->sense_buffer[0] = 0;
500 ipr_cmd->dma_use_sg = 0;
504 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
505 * @ipr_cmd: ipr command struct
510 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
512 ipr_reinit_ipr_cmnd(ipr_cmd);
513 ipr_cmd->u.scratch = 0;
514 ipr_cmd->sibling = NULL;
515 init_timer(&ipr_cmd->timer);
519 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
520 * @ioa_cfg: ioa config struct
523 * pointer to ipr command struct
526 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
528 struct ipr_cmnd *ipr_cmd;
530 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
531 list_del(&ipr_cmd->queue);
532 ipr_init_ipr_cmnd(ipr_cmd);
538 * ipr_unmap_sglist - Unmap scatterlist if mapped
539 * @ioa_cfg: ioa config struct
540 * @ipr_cmd: ipr command struct
545 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
546 struct ipr_cmnd *ipr_cmd)
548 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
550 if (ipr_cmd->dma_use_sg) {
551 if (scsi_cmd->use_sg > 0) {
552 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
554 scsi_cmd->sc_data_direction);
556 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
557 scsi_cmd->request_bufflen,
558 scsi_cmd->sc_data_direction);
564 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
565 * @ioa_cfg: ioa config struct
566 * @clr_ints: interrupts to clear
568 * This function masks all interrupts on the adapter, then clears the
569 * interrupts specified in the mask
574 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
577 volatile u32 int_reg;
579 /* Stop new interrupts */
580 ioa_cfg->allow_interrupts = 0;
582 /* Set interrupt mask to stop all new interrupts */
583 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
585 /* Clear any pending interrupts */
586 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
587 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
591 * ipr_save_pcix_cmd_reg - Save PCI-X command register
592 * @ioa_cfg: ioa config struct
595 * 0 on success / -EIO on failure
597 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
599 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
601 if (pcix_cmd_reg == 0)
604 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
605 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
606 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
610 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
615 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
616 * @ioa_cfg: ioa config struct
619 * 0 on success / -EIO on failure
621 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
623 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
626 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
627 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
628 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
637 * ipr_sata_eh_done - done function for aborted SATA commands
638 * @ipr_cmd: ipr command struct
640 * This function is invoked for ops generated to SATA
641 * devices which are being aborted.
646 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
649 struct ata_queued_cmd *qc = ipr_cmd->qc;
650 struct ipr_sata_port *sata_port = qc->ap->private_data;
652 qc->err_mask |= AC_ERR_OTHER;
653 sata_port->ioasa.status |= ATA_BUSY;
654 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
659 * ipr_scsi_eh_done - mid-layer done function for aborted ops
660 * @ipr_cmd: ipr command struct
662 * This function is invoked by the interrupt handler for
663 * ops generated by the SCSI mid-layer which are being aborted.
668 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
671 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
673 scsi_cmd->result |= (DID_ERROR << 16);
675 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
676 scsi_cmd->scsi_done(scsi_cmd);
677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
681 * ipr_fail_all_ops - Fails all outstanding ops.
682 * @ioa_cfg: ioa config struct
684 * This function fails all outstanding ops.
689 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
691 struct ipr_cmnd *ipr_cmd, *temp;
694 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
695 list_del(&ipr_cmd->queue);
697 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
698 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
700 if (ipr_cmd->scsi_cmd)
701 ipr_cmd->done = ipr_scsi_eh_done;
702 else if (ipr_cmd->qc)
703 ipr_cmd->done = ipr_sata_eh_done;
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
706 del_timer(&ipr_cmd->timer);
707 ipr_cmd->done(ipr_cmd);
714 * ipr_do_req - Send driver initiated requests.
715 * @ipr_cmd: ipr command struct
716 * @done: done function
717 * @timeout_func: timeout function
718 * @timeout: timeout value
720 * This function sends the specified command to the adapter with the
721 * timeout given. The done function is invoked on command completion.
726 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
727 void (*done) (struct ipr_cmnd *),
728 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
730 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
732 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
734 ipr_cmd->done = done;
736 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
737 ipr_cmd->timer.expires = jiffies + timeout;
738 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
740 add_timer(&ipr_cmd->timer);
742 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
745 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
746 ioa_cfg->regs.ioarrin_reg);
750 * ipr_internal_cmd_done - Op done function for an internally generated op.
751 * @ipr_cmd: ipr command struct
753 * This function is the op done function for an internally generated,
754 * blocking op. It simply wakes the sleeping thread.
759 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
761 if (ipr_cmd->sibling)
762 ipr_cmd->sibling = NULL;
764 complete(&ipr_cmd->completion);
768 * ipr_send_blocking_cmd - Send command and sleep on its completion.
769 * @ipr_cmd: ipr command struct
770 * @timeout_func: function to invoke if command times out
776 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
777 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
780 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
782 init_completion(&ipr_cmd->completion);
783 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
785 spin_unlock_irq(ioa_cfg->host->host_lock);
786 wait_for_completion(&ipr_cmd->completion);
787 spin_lock_irq(ioa_cfg->host->host_lock);
791 * ipr_send_hcam - Send an HCAM to the adapter.
792 * @ioa_cfg: ioa config struct
794 * @hostrcb: hostrcb struct
796 * This function will send a Host Controlled Async command to the adapter.
797 * If HCAMs are currently not allowed to be issued to the adapter, it will
798 * place the hostrcb on the free queue.
803 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
804 struct ipr_hostrcb *hostrcb)
806 struct ipr_cmnd *ipr_cmd;
807 struct ipr_ioarcb *ioarcb;
809 if (ioa_cfg->allow_cmds) {
810 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
811 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
812 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
814 ipr_cmd->u.hostrcb = hostrcb;
815 ioarcb = &ipr_cmd->ioarcb;
817 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
818 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
819 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
820 ioarcb->cmd_pkt.cdb[1] = type;
821 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
822 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
824 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
825 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
826 ipr_cmd->ioadl[0].flags_and_data_len =
827 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
828 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
830 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
831 ipr_cmd->done = ipr_process_ccn;
833 ipr_cmd->done = ipr_process_error;
835 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
838 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
839 ioa_cfg->regs.ioarrin_reg);
841 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
846 * ipr_init_res_entry - Initialize a resource entry struct.
847 * @res: resource entry struct
852 static void ipr_init_res_entry(struct ipr_resource_entry *res)
854 res->needs_sync_complete = 0;
857 res->del_from_ml = 0;
858 res->resetting_device = 0;
860 res->sata_port = NULL;
864 * ipr_handle_config_change - Handle a config change from the adapter
865 * @ioa_cfg: ioa config struct
871 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
872 struct ipr_hostrcb *hostrcb)
874 struct ipr_resource_entry *res = NULL;
875 struct ipr_config_table_entry *cfgte;
878 cfgte = &hostrcb->hcam.u.ccn.cfgte;
880 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
881 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
882 sizeof(cfgte->res_addr))) {
889 if (list_empty(&ioa_cfg->free_res_q)) {
890 ipr_send_hcam(ioa_cfg,
891 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
896 res = list_entry(ioa_cfg->free_res_q.next,
897 struct ipr_resource_entry, queue);
899 list_del(&res->queue);
900 ipr_init_res_entry(res);
901 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
904 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
906 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
908 res->del_from_ml = 1;
909 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
910 if (ioa_cfg->allow_ml_add_del)
911 schedule_work(&ioa_cfg->work_q);
913 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
914 } else if (!res->sdev) {
916 if (ioa_cfg->allow_ml_add_del)
917 schedule_work(&ioa_cfg->work_q);
920 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
924 * ipr_process_ccn - Op done function for a CCN.
925 * @ipr_cmd: ipr command struct
927 * This function is the op done function for a configuration
928 * change notification host controlled async from the adapter.
933 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
936 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
937 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
939 list_del(&hostrcb->queue);
940 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
943 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
944 dev_err(&ioa_cfg->pdev->dev,
945 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
947 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
949 ipr_handle_config_change(ioa_cfg, hostrcb);
954 * ipr_log_vpd - Log the passed VPD to the error log.
955 * @vpd: vendor/product id/sn struct
960 static void ipr_log_vpd(struct ipr_vpd *vpd)
962 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
963 + IPR_SERIAL_NUM_LEN];
965 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
966 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
968 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
969 ipr_err("Vendor/Product ID: %s\n", buffer);
971 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
972 buffer[IPR_SERIAL_NUM_LEN] = '\0';
973 ipr_err(" Serial Number: %s\n", buffer);
977 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
978 * @vpd: vendor/product id/sn/wwn struct
983 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
985 ipr_log_vpd(&vpd->vpd);
986 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
987 be32_to_cpu(vpd->wwid[1]));
991 * ipr_log_enhanced_cache_error - Log a cache error.
992 * @ioa_cfg: ioa config struct
993 * @hostrcb: hostrcb struct
998 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
999 struct ipr_hostrcb *hostrcb)
1001 struct ipr_hostrcb_type_12_error *error =
1002 &hostrcb->hcam.u.error.u.type_12_error;
1004 ipr_err("-----Current Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
1006 ipr_log_ext_vpd(&error->ioa_vpd);
1007 ipr_err("Adapter Card Information:\n");
1008 ipr_log_ext_vpd(&error->cfc_vpd);
1010 ipr_err("-----Expected Configuration-----\n");
1011 ipr_err("Cache Directory Card Information:\n");
1012 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1013 ipr_err("Adapter Card Information:\n");
1014 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1016 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1017 be32_to_cpu(error->ioa_data[0]),
1018 be32_to_cpu(error->ioa_data[1]),
1019 be32_to_cpu(error->ioa_data[2]));
1023 * ipr_log_cache_error - Log a cache error.
1024 * @ioa_cfg: ioa config struct
1025 * @hostrcb: hostrcb struct
1030 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1031 struct ipr_hostrcb *hostrcb)
1033 struct ipr_hostrcb_type_02_error *error =
1034 &hostrcb->hcam.u.error.u.type_02_error;
1036 ipr_err("-----Current Configuration-----\n");
1037 ipr_err("Cache Directory Card Information:\n");
1038 ipr_log_vpd(&error->ioa_vpd);
1039 ipr_err("Adapter Card Information:\n");
1040 ipr_log_vpd(&error->cfc_vpd);
1042 ipr_err("-----Expected Configuration-----\n");
1043 ipr_err("Cache Directory Card Information:\n");
1044 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1045 ipr_err("Adapter Card Information:\n");
1046 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1048 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1049 be32_to_cpu(error->ioa_data[0]),
1050 be32_to_cpu(error->ioa_data[1]),
1051 be32_to_cpu(error->ioa_data[2]));
1055 * ipr_log_enhanced_config_error - Log a configuration error.
1056 * @ioa_cfg: ioa config struct
1057 * @hostrcb: hostrcb struct
1062 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1063 struct ipr_hostrcb *hostrcb)
1065 int errors_logged, i;
1066 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1067 struct ipr_hostrcb_type_13_error *error;
1069 error = &hostrcb->hcam.u.error.u.type_13_error;
1070 errors_logged = be32_to_cpu(error->errors_logged);
1072 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1073 be32_to_cpu(error->errors_detected), errors_logged);
1075 dev_entry = error->dev;
1077 for (i = 0; i < errors_logged; i++, dev_entry++) {
1080 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1081 ipr_log_ext_vpd(&dev_entry->vpd);
1083 ipr_err("-----New Device Information-----\n");
1084 ipr_log_ext_vpd(&dev_entry->new_vpd);
1086 ipr_err("Cache Directory Card Information:\n");
1087 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1089 ipr_err("Adapter Card Information:\n");
1090 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1095 * ipr_log_config_error - Log a configuration error.
1096 * @ioa_cfg: ioa config struct
1097 * @hostrcb: hostrcb struct
1102 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1103 struct ipr_hostrcb *hostrcb)
1105 int errors_logged, i;
1106 struct ipr_hostrcb_device_data_entry *dev_entry;
1107 struct ipr_hostrcb_type_03_error *error;
1109 error = &hostrcb->hcam.u.error.u.type_03_error;
1110 errors_logged = be32_to_cpu(error->errors_logged);
1112 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1113 be32_to_cpu(error->errors_detected), errors_logged);
1115 dev_entry = error->dev;
1117 for (i = 0; i < errors_logged; i++, dev_entry++) {
1120 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1121 ipr_log_vpd(&dev_entry->vpd);
1123 ipr_err("-----New Device Information-----\n");
1124 ipr_log_vpd(&dev_entry->new_vpd);
1126 ipr_err("Cache Directory Card Information:\n");
1127 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1129 ipr_err("Adapter Card Information:\n");
1130 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1132 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1133 be32_to_cpu(dev_entry->ioa_data[0]),
1134 be32_to_cpu(dev_entry->ioa_data[1]),
1135 be32_to_cpu(dev_entry->ioa_data[2]),
1136 be32_to_cpu(dev_entry->ioa_data[3]),
1137 be32_to_cpu(dev_entry->ioa_data[4]));
1142 * ipr_log_enhanced_array_error - Log an array configuration error.
1143 * @ioa_cfg: ioa config struct
1144 * @hostrcb: hostrcb struct
1149 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1150 struct ipr_hostrcb *hostrcb)
1153 struct ipr_hostrcb_type_14_error *error;
1154 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1155 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1157 error = &hostrcb->hcam.u.error.u.type_14_error;
1161 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1162 error->protection_level,
1163 ioa_cfg->host->host_no,
1164 error->last_func_vset_res_addr.bus,
1165 error->last_func_vset_res_addr.target,
1166 error->last_func_vset_res_addr.lun);
1170 array_entry = error->array_member;
1171 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1172 sizeof(error->array_member));
1174 for (i = 0; i < num_entries; i++, array_entry++) {
1175 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1178 if (be32_to_cpu(error->exposed_mode_adn) == i)
1179 ipr_err("Exposed Array Member %d:\n", i);
1181 ipr_err("Array Member %d:\n", i);
1183 ipr_log_ext_vpd(&array_entry->vpd);
1184 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1185 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1186 "Expected Location");
1193 * ipr_log_array_error - Log an array configuration error.
1194 * @ioa_cfg: ioa config struct
1195 * @hostrcb: hostrcb struct
1200 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1201 struct ipr_hostrcb *hostrcb)
1204 struct ipr_hostrcb_type_04_error *error;
1205 struct ipr_hostrcb_array_data_entry *array_entry;
1206 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1208 error = &hostrcb->hcam.u.error.u.type_04_error;
1212 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1213 error->protection_level,
1214 ioa_cfg->host->host_no,
1215 error->last_func_vset_res_addr.bus,
1216 error->last_func_vset_res_addr.target,
1217 error->last_func_vset_res_addr.lun);
1221 array_entry = error->array_member;
1223 for (i = 0; i < 18; i++) {
1224 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1227 if (be32_to_cpu(error->exposed_mode_adn) == i)
1228 ipr_err("Exposed Array Member %d:\n", i);
1230 ipr_err("Array Member %d:\n", i);
1232 ipr_log_vpd(&array_entry->vpd);
1234 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1235 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1236 "Expected Location");
1241 array_entry = error->array_member2;
1248 * ipr_log_hex_data - Log additional hex IOA error data.
1249 * @ioa_cfg: ioa config struct
1250 * @data: IOA error data
1256 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1263 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1264 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1266 for (i = 0; i < len / 4; i += 4) {
1267 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1268 be32_to_cpu(data[i]),
1269 be32_to_cpu(data[i+1]),
1270 be32_to_cpu(data[i+2]),
1271 be32_to_cpu(data[i+3]));
1276 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1277 * @ioa_cfg: ioa config struct
1278 * @hostrcb: hostrcb struct
1283 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1284 struct ipr_hostrcb *hostrcb)
1286 struct ipr_hostrcb_type_17_error *error;
1288 error = &hostrcb->hcam.u.error.u.type_17_error;
1289 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1291 ipr_err("%s\n", error->failure_reason);
1292 ipr_err("Remote Adapter VPD:\n");
1293 ipr_log_ext_vpd(&error->vpd);
1294 ipr_log_hex_data(ioa_cfg, error->data,
1295 be32_to_cpu(hostrcb->hcam.length) -
1296 (offsetof(struct ipr_hostrcb_error, u) +
1297 offsetof(struct ipr_hostrcb_type_17_error, data)));
1301 * ipr_log_dual_ioa_error - Log a dual adapter error.
1302 * @ioa_cfg: ioa config struct
1303 * @hostrcb: hostrcb struct
1308 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1309 struct ipr_hostrcb *hostrcb)
1311 struct ipr_hostrcb_type_07_error *error;
1313 error = &hostrcb->hcam.u.error.u.type_07_error;
1314 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1316 ipr_err("%s\n", error->failure_reason);
1317 ipr_err("Remote Adapter VPD:\n");
1318 ipr_log_vpd(&error->vpd);
1319 ipr_log_hex_data(ioa_cfg, error->data,
1320 be32_to_cpu(hostrcb->hcam.length) -
1321 (offsetof(struct ipr_hostrcb_error, u) +
1322 offsetof(struct ipr_hostrcb_type_07_error, data)));
1325 static const struct {
1328 } path_active_desc[] = {
1329 { IPR_PATH_NO_INFO, "Path" },
1330 { IPR_PATH_ACTIVE, "Active path" },
1331 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1334 static const struct {
1337 } path_state_desc[] = {
1338 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1339 { IPR_PATH_HEALTHY, "is healthy" },
1340 { IPR_PATH_DEGRADED, "is degraded" },
1341 { IPR_PATH_FAILED, "is failed" }
1345 * ipr_log_fabric_path - Log a fabric path error
1346 * @hostrcb: hostrcb struct
1347 * @fabric: fabric descriptor
1352 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1353 struct ipr_hostrcb_fabric_desc *fabric)
1356 u8 path_state = fabric->path_state;
1357 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1358 u8 state = path_state & IPR_PATH_STATE_MASK;
1360 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1361 if (path_active_desc[i].active != active)
1364 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1365 if (path_state_desc[j].state != state)
1368 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1369 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1370 path_active_desc[i].desc, path_state_desc[j].desc,
1372 } else if (fabric->cascaded_expander == 0xff) {
1373 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1374 path_active_desc[i].desc, path_state_desc[j].desc,
1375 fabric->ioa_port, fabric->phy);
1376 } else if (fabric->phy == 0xff) {
1377 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1378 path_active_desc[i].desc, path_state_desc[j].desc,
1379 fabric->ioa_port, fabric->cascaded_expander);
1381 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1382 path_active_desc[i].desc, path_state_desc[j].desc,
1383 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1389 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1390 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1393 static const struct {
1396 } path_type_desc[] = {
1397 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1398 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1399 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1400 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1403 static const struct {
1406 } path_status_desc[] = {
1407 { IPR_PATH_CFG_NO_PROB, "Functional" },
1408 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1409 { IPR_PATH_CFG_FAILED, "Failed" },
1410 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1411 { IPR_PATH_NOT_DETECTED, "Missing" },
1412 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1415 static const char *link_rate[] = {
1418 "phy reset problem",
1435 * ipr_log_path_elem - Log a fabric path element.
1436 * @hostrcb: hostrcb struct
1437 * @cfg: fabric path element struct
1442 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1443 struct ipr_hostrcb_config_element *cfg)
1446 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1447 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1449 if (type == IPR_PATH_CFG_NOT_EXIST)
1452 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1453 if (path_type_desc[i].type != type)
1456 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1457 if (path_status_desc[j].status != status)
1460 if (type == IPR_PATH_CFG_IOA_PORT) {
1461 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1462 path_status_desc[j].desc, path_type_desc[i].desc,
1463 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1464 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1466 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1467 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1468 path_status_desc[j].desc, path_type_desc[i].desc,
1469 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1470 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1471 } else if (cfg->cascaded_expander == 0xff) {
1472 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1473 "WWN=%08X%08X\n", path_status_desc[j].desc,
1474 path_type_desc[i].desc, cfg->phy,
1475 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1476 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1477 } else if (cfg->phy == 0xff) {
1478 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1479 "WWN=%08X%08X\n", path_status_desc[j].desc,
1480 path_type_desc[i].desc, cfg->cascaded_expander,
1481 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1482 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1484 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1485 "WWN=%08X%08X\n", path_status_desc[j].desc,
1486 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1487 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1488 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1495 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1496 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1497 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1498 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1502 * ipr_log_fabric_error - Log a fabric error.
1503 * @ioa_cfg: ioa config struct
1504 * @hostrcb: hostrcb struct
1509 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1510 struct ipr_hostrcb *hostrcb)
1512 struct ipr_hostrcb_type_20_error *error;
1513 struct ipr_hostrcb_fabric_desc *fabric;
1514 struct ipr_hostrcb_config_element *cfg;
1517 error = &hostrcb->hcam.u.error.u.type_20_error;
1518 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1519 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1521 add_len = be32_to_cpu(hostrcb->hcam.length) -
1522 (offsetof(struct ipr_hostrcb_error, u) +
1523 offsetof(struct ipr_hostrcb_type_20_error, desc));
1525 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1526 ipr_log_fabric_path(hostrcb, fabric);
1527 for_each_fabric_cfg(fabric, cfg)
1528 ipr_log_path_elem(hostrcb, cfg);
1530 add_len -= be16_to_cpu(fabric->length);
1531 fabric = (struct ipr_hostrcb_fabric_desc *)
1532 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1535 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1539 * ipr_log_generic_error - Log an adapter error.
1540 * @ioa_cfg: ioa config struct
1541 * @hostrcb: hostrcb struct
1546 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1547 struct ipr_hostrcb *hostrcb)
1549 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1550 be32_to_cpu(hostrcb->hcam.length));
1554 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1557 * This function will return the index of into the ipr_error_table
1558 * for the specified IOASC. If the IOASC is not in the table,
1559 * 0 will be returned, which points to the entry used for unknown errors.
1562 * index into the ipr_error_table
1564 static u32 ipr_get_error(u32 ioasc)
1568 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1569 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1576 * ipr_handle_log_data - Log an adapter error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1580 * This function logs an adapter error to the system.
1585 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1586 struct ipr_hostrcb *hostrcb)
1591 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1594 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1595 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1597 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1599 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1600 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1601 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1602 scsi_report_bus_reset(ioa_cfg->host,
1603 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1606 error_index = ipr_get_error(ioasc);
1608 if (!ipr_error_table[error_index].log_hcam)
1611 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1613 /* Set indication we have logged an error */
1614 ioa_cfg->errors_logged++;
1616 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1618 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1619 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1621 switch (hostrcb->hcam.overlay_id) {
1622 case IPR_HOST_RCB_OVERLAY_ID_2:
1623 ipr_log_cache_error(ioa_cfg, hostrcb);
1625 case IPR_HOST_RCB_OVERLAY_ID_3:
1626 ipr_log_config_error(ioa_cfg, hostrcb);
1628 case IPR_HOST_RCB_OVERLAY_ID_4:
1629 case IPR_HOST_RCB_OVERLAY_ID_6:
1630 ipr_log_array_error(ioa_cfg, hostrcb);
1632 case IPR_HOST_RCB_OVERLAY_ID_7:
1633 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1635 case IPR_HOST_RCB_OVERLAY_ID_12:
1636 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1638 case IPR_HOST_RCB_OVERLAY_ID_13:
1639 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1641 case IPR_HOST_RCB_OVERLAY_ID_14:
1642 case IPR_HOST_RCB_OVERLAY_ID_16:
1643 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1645 case IPR_HOST_RCB_OVERLAY_ID_17:
1646 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1648 case IPR_HOST_RCB_OVERLAY_ID_20:
1649 ipr_log_fabric_error(ioa_cfg, hostrcb);
1651 case IPR_HOST_RCB_OVERLAY_ID_1:
1652 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1654 ipr_log_generic_error(ioa_cfg, hostrcb);
1660 * ipr_process_error - Op done function for an adapter error log.
1661 * @ipr_cmd: ipr command struct
1663 * This function is the op done function for an error log host
1664 * controlled async from the adapter. It will log the error and
1665 * send the HCAM back to the adapter.
1670 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1673 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1674 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1676 list_del(&hostrcb->queue);
1677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1680 ipr_handle_log_data(ioa_cfg, hostrcb);
1681 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1682 dev_err(&ioa_cfg->pdev->dev,
1683 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1686 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1690 * ipr_timeout - An internally generated op has timed out.
1691 * @ipr_cmd: ipr command struct
1693 * This function blocks host requests and initiates an
1699 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1701 unsigned long lock_flags = 0;
1702 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1705 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1707 ioa_cfg->errors_logged++;
1708 dev_err(&ioa_cfg->pdev->dev,
1709 "Adapter being reset due to command timeout.\n");
1711 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1712 ioa_cfg->sdt_state = GET_DUMP;
1714 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1715 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1717 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1722 * ipr_oper_timeout - Adapter timed out transitioning to operational
1723 * @ipr_cmd: ipr command struct
1725 * This function blocks host requests and initiates an
1731 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1733 unsigned long lock_flags = 0;
1734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1737 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1739 ioa_cfg->errors_logged++;
1740 dev_err(&ioa_cfg->pdev->dev,
1741 "Adapter timed out transitioning to operational.\n");
1743 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1744 ioa_cfg->sdt_state = GET_DUMP;
1746 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1748 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1749 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1752 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1757 * ipr_reset_reload - Reset/Reload the IOA
1758 * @ioa_cfg: ioa config struct
1759 * @shutdown_type: shutdown type
1761 * This function resets the adapter and re-initializes it.
1762 * This function assumes that all new host commands have been stopped.
1766 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1767 enum ipr_shutdown_type shutdown_type)
1769 if (!ioa_cfg->in_reset_reload)
1770 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1772 spin_unlock_irq(ioa_cfg->host->host_lock);
1773 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1774 spin_lock_irq(ioa_cfg->host->host_lock);
1776 /* If we got hit with a host reset while we were already resetting
1777 the adapter for some reason, and the reset failed. */
1778 if (ioa_cfg->ioa_is_dead) {
1787 * ipr_find_ses_entry - Find matching SES in SES table
1788 * @res: resource entry struct of SES
1791 * pointer to SES table entry / NULL on failure
1793 static const struct ipr_ses_table_entry *
1794 ipr_find_ses_entry(struct ipr_resource_entry *res)
1797 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1799 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1800 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1801 if (ste->compare_product_id_byte[j] == 'X') {
1802 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1810 if (matches == IPR_PROD_ID_LEN)
1818 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1819 * @ioa_cfg: ioa config struct
1821 * @bus_width: bus width
1824 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1825 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1826 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1827 * max 160MHz = max 320MB/sec).
1829 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1831 struct ipr_resource_entry *res;
1832 const struct ipr_ses_table_entry *ste;
1833 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1835 /* Loop through each config table entry in the config table buffer */
1836 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1837 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1840 if (bus != res->cfgte.res_addr.bus)
1843 if (!(ste = ipr_find_ses_entry(res)))
1846 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1849 return max_xfer_rate;
1853 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1854 * @ioa_cfg: ioa config struct
1855 * @max_delay: max delay in micro-seconds to wait
1857 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1860 * 0 on success / other on failure
1862 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1864 volatile u32 pcii_reg;
1867 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1868 while (delay < max_delay) {
1869 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1871 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1874 /* udelay cannot be used if delay is more than a few milliseconds */
1875 if ((delay / 1000) > MAX_UDELAY_MS)
1876 mdelay(delay / 1000);
1886 * ipr_get_ldump_data_section - Dump IOA memory
1887 * @ioa_cfg: ioa config struct
1888 * @start_addr: adapter address to dump
1889 * @dest: destination kernel buffer
1890 * @length_in_words: length to dump in 4 byte words
1893 * 0 on success / -EIO on failure
1895 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1897 __be32 *dest, u32 length_in_words)
1899 volatile u32 temp_pcii_reg;
1902 /* Write IOA interrupt reg starting LDUMP state */
1903 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1904 ioa_cfg->regs.set_uproc_interrupt_reg);
1906 /* Wait for IO debug acknowledge */
1907 if (ipr_wait_iodbg_ack(ioa_cfg,
1908 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1909 dev_err(&ioa_cfg->pdev->dev,
1910 "IOA dump long data transfer timeout\n");
1914 /* Signal LDUMP interlocked - clear IO debug ack */
1915 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1916 ioa_cfg->regs.clr_interrupt_reg);
1918 /* Write Mailbox with starting address */
1919 writel(start_addr, ioa_cfg->ioa_mailbox);
1921 /* Signal address valid - clear IOA Reset alert */
1922 writel(IPR_UPROCI_RESET_ALERT,
1923 ioa_cfg->regs.clr_uproc_interrupt_reg);
1925 for (i = 0; i < length_in_words; i++) {
1926 /* Wait for IO debug acknowledge */
1927 if (ipr_wait_iodbg_ack(ioa_cfg,
1928 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1929 dev_err(&ioa_cfg->pdev->dev,
1930 "IOA dump short data transfer timeout\n");
1934 /* Read data from mailbox and increment destination pointer */
1935 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1938 /* For all but the last word of data, signal data received */
1939 if (i < (length_in_words - 1)) {
1940 /* Signal dump data received - Clear IO debug Ack */
1941 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1942 ioa_cfg->regs.clr_interrupt_reg);
1946 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1947 writel(IPR_UPROCI_RESET_ALERT,
1948 ioa_cfg->regs.set_uproc_interrupt_reg);
1950 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1951 ioa_cfg->regs.clr_uproc_interrupt_reg);
1953 /* Signal dump data received - Clear IO debug Ack */
1954 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1955 ioa_cfg->regs.clr_interrupt_reg);
1957 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1958 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1960 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1962 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1972 #ifdef CONFIG_SCSI_IPR_DUMP
1974 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1975 * @ioa_cfg: ioa config struct
1976 * @pci_address: adapter address
1977 * @length: length of data to copy
1979 * Copy data from PCI adapter to kernel buffer.
1980 * Note: length MUST be a 4 byte multiple
1982 * 0 on success / other on failure
1984 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1985 unsigned long pci_address, u32 length)
1987 int bytes_copied = 0;
1988 int cur_len, rc, rem_len, rem_page_len;
1990 unsigned long lock_flags = 0;
1991 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1993 while (bytes_copied < length &&
1994 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1995 if (ioa_dump->page_offset >= PAGE_SIZE ||
1996 ioa_dump->page_offset == 0) {
1997 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2001 return bytes_copied;
2004 ioa_dump->page_offset = 0;
2005 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2006 ioa_dump->next_page_index++;
2008 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2010 rem_len = length - bytes_copied;
2011 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2012 cur_len = min(rem_len, rem_page_len);
2014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2015 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2018 rc = ipr_get_ldump_data_section(ioa_cfg,
2019 pci_address + bytes_copied,
2020 &page[ioa_dump->page_offset / 4],
2021 (cur_len / sizeof(u32)));
2023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2026 ioa_dump->page_offset += cur_len;
2027 bytes_copied += cur_len;
2035 return bytes_copied;
2039 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2040 * @hdr: dump entry header struct
2045 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2047 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2049 hdr->offset = sizeof(*hdr);
2050 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2054 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2055 * @ioa_cfg: ioa config struct
2056 * @driver_dump: driver dump struct
2061 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2062 struct ipr_driver_dump *driver_dump)
2064 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2066 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2067 driver_dump->ioa_type_entry.hdr.len =
2068 sizeof(struct ipr_dump_ioa_type_entry) -
2069 sizeof(struct ipr_dump_entry_header);
2070 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2071 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2072 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2073 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2074 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2075 ucode_vpd->minor_release[1];
2076 driver_dump->hdr.num_entries++;
2080 * ipr_dump_version_data - Fill in the driver version in the dump.
2081 * @ioa_cfg: ioa config struct
2082 * @driver_dump: driver dump struct
2087 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2088 struct ipr_driver_dump *driver_dump)
2090 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2091 driver_dump->version_entry.hdr.len =
2092 sizeof(struct ipr_dump_version_entry) -
2093 sizeof(struct ipr_dump_entry_header);
2094 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2095 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2096 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2097 driver_dump->hdr.num_entries++;
2101 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2102 * @ioa_cfg: ioa config struct
2103 * @driver_dump: driver dump struct
2108 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2109 struct ipr_driver_dump *driver_dump)
2111 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2112 driver_dump->trace_entry.hdr.len =
2113 sizeof(struct ipr_dump_trace_entry) -
2114 sizeof(struct ipr_dump_entry_header);
2115 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2116 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2117 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2118 driver_dump->hdr.num_entries++;
2122 * ipr_dump_location_data - Fill in the IOA location in the dump.
2123 * @ioa_cfg: ioa config struct
2124 * @driver_dump: driver dump struct
2129 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2130 struct ipr_driver_dump *driver_dump)
2132 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2133 driver_dump->location_entry.hdr.len =
2134 sizeof(struct ipr_dump_location_entry) -
2135 sizeof(struct ipr_dump_entry_header);
2136 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2137 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2138 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2139 driver_dump->hdr.num_entries++;
2143 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2144 * @ioa_cfg: ioa config struct
2145 * @dump: dump struct
2150 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2152 unsigned long start_addr, sdt_word;
2153 unsigned long lock_flags = 0;
2154 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2155 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2156 u32 num_entries, start_off, end_off;
2157 u32 bytes_to_copy, bytes_copied, rc;
2158 struct ipr_sdt *sdt;
2163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2165 if (ioa_cfg->sdt_state != GET_DUMP) {
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170 start_addr = readl(ioa_cfg->ioa_mailbox);
2172 if (!ipr_sdt_is_fmt2(start_addr)) {
2173 dev_err(&ioa_cfg->pdev->dev,
2174 "Invalid dump table format: %lx\n", start_addr);
2175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2179 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2181 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2183 /* Initialize the overall dump header */
2184 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2185 driver_dump->hdr.num_entries = 1;
2186 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2187 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2188 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2189 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2191 ipr_dump_version_data(ioa_cfg, driver_dump);
2192 ipr_dump_location_data(ioa_cfg, driver_dump);
2193 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2194 ipr_dump_trace_data(ioa_cfg, driver_dump);
2196 /* Update dump_header */
2197 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2199 /* IOA Dump entry */
2200 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2201 ioa_dump->format = IPR_SDT_FMT2;
2202 ioa_dump->hdr.len = 0;
2203 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2204 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2206 /* First entries in sdt are actually a list of dump addresses and
2207 lengths to gather the real dump data. sdt represents the pointer
2208 to the ioa generated dump table. Dump data will be extracted based
2209 on entries in this table */
2210 sdt = &ioa_dump->sdt;
2212 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2213 sizeof(struct ipr_sdt) / sizeof(__be32));
2215 /* Smart Dump table is ready to use and the first entry is valid */
2216 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2217 dev_err(&ioa_cfg->pdev->dev,
2218 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2219 rc, be32_to_cpu(sdt->hdr.state));
2220 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2221 ioa_cfg->sdt_state = DUMP_OBTAINED;
2222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2226 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2228 if (num_entries > IPR_NUM_SDT_ENTRIES)
2229 num_entries = IPR_NUM_SDT_ENTRIES;
2231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2233 for (i = 0; i < num_entries; i++) {
2234 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2235 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2239 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2240 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2241 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2242 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2244 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2245 bytes_to_copy = end_off - start_off;
2246 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2247 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2251 /* Copy data from adapter to driver buffers */
2252 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2255 ioa_dump->hdr.len += bytes_copied;
2257 if (bytes_copied != bytes_to_copy) {
2258 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2265 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2267 /* Update dump_header */
2268 driver_dump->hdr.len += ioa_dump->hdr.len;
2270 ioa_cfg->sdt_state = DUMP_OBTAINED;
2275 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2279 * ipr_release_dump - Free adapter dump memory
2280 * @kref: kref struct
2285 static void ipr_release_dump(struct kref *kref)
2287 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2288 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2289 unsigned long lock_flags = 0;
2293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2294 ioa_cfg->dump = NULL;
2295 ioa_cfg->sdt_state = INACTIVE;
2296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2298 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2299 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2306 * ipr_worker_thread - Worker thread
2307 * @work: ioa config struct
2309 * Called at task level from a work thread. This function takes care
2310 * of adding and removing device from the mid-layer as configuration
2311 * changes are detected by the adapter.
2316 static void ipr_worker_thread(struct work_struct *work)
2318 unsigned long lock_flags;
2319 struct ipr_resource_entry *res;
2320 struct scsi_device *sdev;
2321 struct ipr_dump *dump;
2322 struct ipr_ioa_cfg *ioa_cfg =
2323 container_of(work, struct ipr_ioa_cfg, work_q);
2324 u8 bus, target, lun;
2328 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2330 if (ioa_cfg->sdt_state == GET_DUMP) {
2331 dump = ioa_cfg->dump;
2333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2336 kref_get(&dump->kref);
2337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2338 ipr_get_ioa_dump(ioa_cfg, dump);
2339 kref_put(&dump->kref, ipr_release_dump);
2341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2342 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2343 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2351 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2356 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2357 if (res->del_from_ml && res->sdev) {
2360 if (!scsi_device_get(sdev)) {
2361 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2363 scsi_remove_device(sdev);
2364 scsi_device_put(sdev);
2365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2372 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2373 if (res->add_to_ml) {
2374 bus = res->cfgte.res_addr.bus;
2375 target = res->cfgte.res_addr.target;
2376 lun = res->cfgte.res_addr.lun;
2378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2379 scsi_add_device(ioa_cfg->host, bus, target, lun);
2380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2386 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2390 #ifdef CONFIG_SCSI_IPR_TRACE
2392 * ipr_read_trace - Dump the adapter trace
2393 * @kobj: kobject struct
2396 * @count: buffer size
2399 * number of bytes printed to buffer
2401 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2402 loff_t off, size_t count)
2404 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2405 struct Scsi_Host *shost = class_to_shost(cdev);
2406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2407 unsigned long lock_flags = 0;
2408 int size = IPR_TRACE_SIZE;
2409 char *src = (char *)ioa_cfg->trace;
2413 if (off + count > size) {
2418 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2419 memcpy(buf, &src[off], count);
2420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2424 static struct bin_attribute ipr_trace_attr = {
2430 .read = ipr_read_trace,
2434 static const struct {
2435 enum ipr_cache_state state;
2437 } cache_state [] = {
2438 { CACHE_NONE, "none" },
2439 { CACHE_DISABLED, "disabled" },
2440 { CACHE_ENABLED, "enabled" }
2444 * ipr_show_write_caching - Show the write caching attribute
2445 * @class_dev: class device struct
2449 * number of bytes printed to buffer
2451 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2453 struct Scsi_Host *shost = class_to_shost(class_dev);
2454 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2455 unsigned long lock_flags = 0;
2458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2459 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2460 if (cache_state[i].state == ioa_cfg->cache_state) {
2461 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471 * ipr_store_write_caching - Enable/disable adapter write cache
2472 * @class_dev: class_device struct
2474 * @count: buffer size
2476 * This function will enable/disable adapter write cache.
2479 * count on success / other on failure
2481 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2482 const char *buf, size_t count)
2484 struct Scsi_Host *shost = class_to_shost(class_dev);
2485 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2486 unsigned long lock_flags = 0;
2487 enum ipr_cache_state new_state = CACHE_INVALID;
2490 if (!capable(CAP_SYS_ADMIN))
2492 if (ioa_cfg->cache_state == CACHE_NONE)
2495 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2496 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2497 new_state = cache_state[i].state;
2502 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2506 if (ioa_cfg->cache_state == new_state) {
2507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511 ioa_cfg->cache_state = new_state;
2512 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2513 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2514 if (!ioa_cfg->in_reset_reload)
2515 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2517 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2522 static struct class_device_attribute ipr_ioa_cache_attr = {
2524 .name = "write_cache",
2525 .mode = S_IRUGO | S_IWUSR,
2527 .show = ipr_show_write_caching,
2528 .store = ipr_store_write_caching
2532 * ipr_show_fw_version - Show the firmware version
2533 * @class_dev: class device struct
2537 * number of bytes printed to buffer
2539 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2541 struct Scsi_Host *shost = class_to_shost(class_dev);
2542 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2543 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2544 unsigned long lock_flags = 0;
2547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2548 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2549 ucode_vpd->major_release, ucode_vpd->card_type,
2550 ucode_vpd->minor_release[0],
2551 ucode_vpd->minor_release[1]);
2552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556 static struct class_device_attribute ipr_fw_version_attr = {
2558 .name = "fw_version",
2561 .show = ipr_show_fw_version,
2565 * ipr_show_log_level - Show the adapter's error logging level
2566 * @class_dev: class device struct
2570 * number of bytes printed to buffer
2572 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2574 struct Scsi_Host *shost = class_to_shost(class_dev);
2575 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2576 unsigned long lock_flags = 0;
2579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2580 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2586 * ipr_store_log_level - Change the adapter's error logging level
2587 * @class_dev: class device struct
2591 * number of bytes printed to buffer
2593 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2594 const char *buf, size_t count)
2596 struct Scsi_Host *shost = class_to_shost(class_dev);
2597 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2598 unsigned long lock_flags = 0;
2600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2601 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2606 static struct class_device_attribute ipr_log_level_attr = {
2608 .name = "log_level",
2609 .mode = S_IRUGO | S_IWUSR,
2611 .show = ipr_show_log_level,
2612 .store = ipr_store_log_level
2616 * ipr_store_diagnostics - IOA Diagnostics interface
2617 * @class_dev: class_device struct
2619 * @count: buffer size
2621 * This function will reset the adapter and wait a reasonable
2622 * amount of time for any errors that the adapter might log.
2625 * count on success / other on failure
2627 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2628 const char *buf, size_t count)
2630 struct Scsi_Host *shost = class_to_shost(class_dev);
2631 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2632 unsigned long lock_flags = 0;
2635 if (!capable(CAP_SYS_ADMIN))
2638 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2639 while(ioa_cfg->in_reset_reload) {
2640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2641 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2642 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2645 ioa_cfg->errors_logged = 0;
2646 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2648 if (ioa_cfg->in_reset_reload) {
2649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2650 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2652 /* Wait for a second for any errors to be logged */
2655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2659 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2660 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2667 static struct class_device_attribute ipr_diagnostics_attr = {
2669 .name = "run_diagnostics",
2672 .store = ipr_store_diagnostics
2676 * ipr_show_adapter_state - Show the adapter's state
2677 * @class_dev: class device struct
2681 * number of bytes printed to buffer
2683 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2685 struct Scsi_Host *shost = class_to_shost(class_dev);
2686 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2687 unsigned long lock_flags = 0;
2690 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2691 if (ioa_cfg->ioa_is_dead)
2692 len = snprintf(buf, PAGE_SIZE, "offline\n");
2694 len = snprintf(buf, PAGE_SIZE, "online\n");
2695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2700 * ipr_store_adapter_state - Change adapter state
2701 * @class_dev: class_device struct
2703 * @count: buffer size
2705 * This function will change the adapter's state.
2708 * count on success / other on failure
2710 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2711 const char *buf, size_t count)
2713 struct Scsi_Host *shost = class_to_shost(class_dev);
2714 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2715 unsigned long lock_flags;
2718 if (!capable(CAP_SYS_ADMIN))
2721 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2722 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2723 ioa_cfg->ioa_is_dead = 0;
2724 ioa_cfg->reset_retries = 0;
2725 ioa_cfg->in_ioa_bringdown = 0;
2726 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2729 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2734 static struct class_device_attribute ipr_ioa_state_attr = {
2737 .mode = S_IRUGO | S_IWUSR,
2739 .show = ipr_show_adapter_state,
2740 .store = ipr_store_adapter_state
2744 * ipr_store_reset_adapter - Reset the adapter
2745 * @class_dev: class_device struct
2747 * @count: buffer size
2749 * This function will reset the adapter.
2752 * count on success / other on failure
2754 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2755 const char *buf, size_t count)
2757 struct Scsi_Host *shost = class_to_shost(class_dev);
2758 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2759 unsigned long lock_flags;
2762 if (!capable(CAP_SYS_ADMIN))
2765 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2766 if (!ioa_cfg->in_reset_reload)
2767 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2769 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2774 static struct class_device_attribute ipr_ioa_reset_attr = {
2776 .name = "reset_host",
2779 .store = ipr_store_reset_adapter
2783 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2784 * @buf_len: buffer length
2786 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2787 * list to use for microcode download
2790 * pointer to sglist / NULL on failure
2792 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2794 int sg_size, order, bsize_elem, num_elem, i, j;
2795 struct ipr_sglist *sglist;
2796 struct scatterlist *scatterlist;
2799 /* Get the minimum size per scatter/gather element */
2800 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2802 /* Get the actual size per element */
2803 order = get_order(sg_size);
2805 /* Determine the actual number of bytes per element */
2806 bsize_elem = PAGE_SIZE * (1 << order);
2808 /* Determine the actual number of sg entries needed */
2809 if (buf_len % bsize_elem)
2810 num_elem = (buf_len / bsize_elem) + 1;
2812 num_elem = buf_len / bsize_elem;
2814 /* Allocate a scatter/gather list for the DMA */
2815 sglist = kzalloc(sizeof(struct ipr_sglist) +
2816 (sizeof(struct scatterlist) * (num_elem - 1)),
2819 if (sglist == NULL) {
2824 scatterlist = sglist->scatterlist;
2826 sglist->order = order;
2827 sglist->num_sg = num_elem;
2829 /* Allocate a bunch of sg elements */
2830 for (i = 0; i < num_elem; i++) {
2831 page = alloc_pages(GFP_KERNEL, order);
2835 /* Free up what we already allocated */
2836 for (j = i - 1; j >= 0; j--)
2837 __free_pages(scatterlist[j].page, order);
2842 scatterlist[i].page = page;
2849 * ipr_free_ucode_buffer - Frees a microcode download buffer
2850 * @p_dnld: scatter/gather list pointer
2852 * Free a DMA'able ucode download buffer previously allocated with
2853 * ipr_alloc_ucode_buffer
2858 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2862 for (i = 0; i < sglist->num_sg; i++)
2863 __free_pages(sglist->scatterlist[i].page, sglist->order);
2869 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2870 * @sglist: scatter/gather list pointer
2871 * @buffer: buffer pointer
2872 * @len: buffer length
2874 * Copy a microcode image from a user buffer into a buffer allocated by
2875 * ipr_alloc_ucode_buffer
2878 * 0 on success / other on failure
2880 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2881 u8 *buffer, u32 len)
2883 int bsize_elem, i, result = 0;
2884 struct scatterlist *scatterlist;
2887 /* Determine the actual number of bytes per element */
2888 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2890 scatterlist = sglist->scatterlist;
2892 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2893 kaddr = kmap(scatterlist[i].page);
2894 memcpy(kaddr, buffer, bsize_elem);
2895 kunmap(scatterlist[i].page);
2897 scatterlist[i].length = bsize_elem;
2905 if (len % bsize_elem) {
2906 kaddr = kmap(scatterlist[i].page);
2907 memcpy(kaddr, buffer, len % bsize_elem);
2908 kunmap(scatterlist[i].page);
2910 scatterlist[i].length = len % bsize_elem;
2913 sglist->buffer_len = len;
2918 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2919 * @ipr_cmd: ipr command struct
2920 * @sglist: scatter/gather list
2922 * Builds a microcode download IOA data list (IOADL).
2925 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2926 struct ipr_sglist *sglist)
2928 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2929 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2930 struct scatterlist *scatterlist = sglist->scatterlist;
2933 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2934 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2935 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2936 ioarcb->write_ioadl_len =
2937 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2939 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2940 ioadl[i].flags_and_data_len =
2941 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2943 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2946 ioadl[i-1].flags_and_data_len |=
2947 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2951 * ipr_update_ioa_ucode - Update IOA's microcode
2952 * @ioa_cfg: ioa config struct
2953 * @sglist: scatter/gather list
2955 * Initiate an adapter reset to update the IOA's microcode
2958 * 0 on success / -EIO on failure
2960 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2961 struct ipr_sglist *sglist)
2963 unsigned long lock_flags;
2965 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2966 while(ioa_cfg->in_reset_reload) {
2967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2968 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2969 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2972 if (ioa_cfg->ucode_sglist) {
2973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2974 dev_err(&ioa_cfg->pdev->dev,
2975 "Microcode download already in progress\n");
2979 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2980 sglist->num_sg, DMA_TO_DEVICE);
2982 if (!sglist->num_dma_sg) {
2983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2984 dev_err(&ioa_cfg->pdev->dev,
2985 "Failed to map microcode download buffer!\n");
2989 ioa_cfg->ucode_sglist = sglist;
2990 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2991 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2992 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2994 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2995 ioa_cfg->ucode_sglist = NULL;
2996 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3001 * ipr_store_update_fw - Update the firmware on the adapter
3002 * @class_dev: class_device struct
3004 * @count: buffer size
3006 * This function will update the firmware on the adapter.
3009 * count on success / other on failure
3011 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3012 const char *buf, size_t count)
3014 struct Scsi_Host *shost = class_to_shost(class_dev);
3015 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3016 struct ipr_ucode_image_header *image_hdr;
3017 const struct firmware *fw_entry;
3018 struct ipr_sglist *sglist;
3021 int len, result, dnld_size;
3023 if (!capable(CAP_SYS_ADMIN))
3026 len = snprintf(fname, 99, "%s", buf);
3027 fname[len-1] = '\0';
3029 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3030 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3034 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3036 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3037 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3038 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3039 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3040 release_firmware(fw_entry);
3044 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3045 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3046 sglist = ipr_alloc_ucode_buffer(dnld_size);
3049 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3050 release_firmware(fw_entry);
3054 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3057 dev_err(&ioa_cfg->pdev->dev,
3058 "Microcode buffer copy to DMA buffer failed\n");
3062 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3067 ipr_free_ucode_buffer(sglist);
3068 release_firmware(fw_entry);
3072 static struct class_device_attribute ipr_update_fw_attr = {
3074 .name = "update_fw",
3077 .store = ipr_store_update_fw
3080 static struct class_device_attribute *ipr_ioa_attrs[] = {
3081 &ipr_fw_version_attr,
3082 &ipr_log_level_attr,
3083 &ipr_diagnostics_attr,
3084 &ipr_ioa_state_attr,
3085 &ipr_ioa_reset_attr,
3086 &ipr_update_fw_attr,
3087 &ipr_ioa_cache_attr,
3091 #ifdef CONFIG_SCSI_IPR_DUMP
3093 * ipr_read_dump - Dump the adapter
3094 * @kobj: kobject struct
3097 * @count: buffer size
3100 * number of bytes printed to buffer
3102 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3103 loff_t off, size_t count)
3105 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3106 struct Scsi_Host *shost = class_to_shost(cdev);
3107 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3108 struct ipr_dump *dump;
3109 unsigned long lock_flags = 0;
3114 if (!capable(CAP_SYS_ADMIN))
3117 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3118 dump = ioa_cfg->dump;
3120 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3121 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3124 kref_get(&dump->kref);
3125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3127 if (off > dump->driver_dump.hdr.len) {
3128 kref_put(&dump->kref, ipr_release_dump);
3132 if (off + count > dump->driver_dump.hdr.len) {
3133 count = dump->driver_dump.hdr.len - off;
3137 if (count && off < sizeof(dump->driver_dump)) {
3138 if (off + count > sizeof(dump->driver_dump))
3139 len = sizeof(dump->driver_dump) - off;
3142 src = (u8 *)&dump->driver_dump + off;
3143 memcpy(buf, src, len);
3149 off -= sizeof(dump->driver_dump);
3151 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3152 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3153 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3156 src = (u8 *)&dump->ioa_dump + off;
3157 memcpy(buf, src, len);
3163 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3166 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3167 len = PAGE_ALIGN(off) - off;
3170 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3171 src += off & ~PAGE_MASK;
3172 memcpy(buf, src, len);
3178 kref_put(&dump->kref, ipr_release_dump);
3183 * ipr_alloc_dump - Prepare for adapter dump
3184 * @ioa_cfg: ioa config struct
3187 * 0 on success / other on failure
3189 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3191 struct ipr_dump *dump;
3192 unsigned long lock_flags = 0;
3194 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3197 ipr_err("Dump memory allocation failed\n");
3201 kref_init(&dump->kref);
3202 dump->ioa_cfg = ioa_cfg;
3204 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3206 if (INACTIVE != ioa_cfg->sdt_state) {
3207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3212 ioa_cfg->dump = dump;
3213 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3214 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3215 ioa_cfg->dump_taken = 1;
3216 schedule_work(&ioa_cfg->work_q);
3218 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3224 * ipr_free_dump - Free adapter dump memory
3225 * @ioa_cfg: ioa config struct
3228 * 0 on success / other on failure
3230 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3232 struct ipr_dump *dump;
3233 unsigned long lock_flags = 0;
3237 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3238 dump = ioa_cfg->dump;
3240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3244 ioa_cfg->dump = NULL;
3245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3247 kref_put(&dump->kref, ipr_release_dump);
3254 * ipr_write_dump - Setup dump state of adapter
3255 * @kobj: kobject struct
3258 * @count: buffer size
3261 * number of bytes printed to buffer
3263 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3264 loff_t off, size_t count)
3266 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3267 struct Scsi_Host *shost = class_to_shost(cdev);
3268 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3271 if (!capable(CAP_SYS_ADMIN))
3275 rc = ipr_alloc_dump(ioa_cfg);
3276 else if (buf[0] == '0')
3277 rc = ipr_free_dump(ioa_cfg);
3287 static struct bin_attribute ipr_dump_attr = {
3290 .mode = S_IRUSR | S_IWUSR,
3293 .read = ipr_read_dump,
3294 .write = ipr_write_dump
3297 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3301 * ipr_change_queue_depth - Change the device's queue depth
3302 * @sdev: scsi device struct
3303 * @qdepth: depth to set
3308 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3310 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3311 struct ipr_resource_entry *res;
3312 unsigned long lock_flags = 0;
3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3315 res = (struct ipr_resource_entry *)sdev->hostdata;
3317 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3318 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3321 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3322 return sdev->queue_depth;
3326 * ipr_change_queue_type - Change the device's queue type
3327 * @dsev: scsi device struct
3328 * @tag_type: type of tags to use
3331 * actual queue type set
3333 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3335 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3336 struct ipr_resource_entry *res;
3337 unsigned long lock_flags = 0;
3339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340 res = (struct ipr_resource_entry *)sdev->hostdata;
3343 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3345 * We don't bother quiescing the device here since the
3346 * adapter firmware does it for us.
3348 scsi_set_tag_type(sdev, tag_type);
3351 scsi_activate_tcq(sdev, sdev->queue_depth);
3353 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3364 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3365 * @dev: device struct
3369 * number of bytes printed to buffer
3371 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3373 struct scsi_device *sdev = to_scsi_device(dev);
3374 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3375 struct ipr_resource_entry *res;
3376 unsigned long lock_flags = 0;
3377 ssize_t len = -ENXIO;
3379 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3380 res = (struct ipr_resource_entry *)sdev->hostdata;
3382 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3387 static struct device_attribute ipr_adapter_handle_attr = {
3389 .name = "adapter_handle",
3392 .show = ipr_show_adapter_handle
3395 static struct device_attribute *ipr_dev_attrs[] = {
3396 &ipr_adapter_handle_attr,
3401 * ipr_biosparam - Return the HSC mapping
3402 * @sdev: scsi device struct
3403 * @block_device: block device pointer
3404 * @capacity: capacity of the device
3405 * @parm: Array containing returned HSC values.
3407 * This function generates the HSC parms that fdisk uses.
3408 * We want to make sure we return something that places partitions
3409 * on 4k boundaries for best performance with the IOA.
3414 static int ipr_biosparam(struct scsi_device *sdev,
3415 struct block_device *block_device,
3416 sector_t capacity, int *parm)
3424 cylinders = capacity;
3425 sector_div(cylinders, (128 * 32));
3430 parm[2] = cylinders;
3436 * ipr_find_starget - Find target based on bus/target.
3437 * @starget: scsi target struct
3440 * resource entry pointer if found / NULL if not found
3442 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3444 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3445 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3446 struct ipr_resource_entry *res;
3448 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3449 if ((res->cfgte.res_addr.bus == starget->channel) &&
3450 (res->cfgte.res_addr.target == starget->id) &&
3451 (res->cfgte.res_addr.lun == 0)) {
3459 static struct ata_port_info sata_port_info;
3462 * ipr_target_alloc - Prepare for commands to a SCSI target
3463 * @starget: scsi target struct
3465 * If the device is a SATA device, this function allocates an
3466 * ATA port with libata, else it does nothing.
3469 * 0 on success / non-0 on failure
3471 static int ipr_target_alloc(struct scsi_target *starget)
3473 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3474 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3475 struct ipr_sata_port *sata_port;
3476 struct ata_port *ap;
3477 struct ipr_resource_entry *res;
3478 unsigned long lock_flags;
3480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3481 res = ipr_find_starget(starget);
3482 starget->hostdata = NULL;
3484 if (res && ipr_is_gata(res)) {
3485 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3486 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3490 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3492 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493 sata_port->ioa_cfg = ioa_cfg;
3495 sata_port->res = res;
3497 res->sata_port = sata_port;
3498 ap->private_data = sata_port;
3499 starget->hostdata = sata_port;
3505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3511 * ipr_target_destroy - Destroy a SCSI target
3512 * @starget: scsi target struct
3514 * If the device was a SATA device, this function frees the libata
3515 * ATA port, else it does nothing.
3518 static void ipr_target_destroy(struct scsi_target *starget)
3520 struct ipr_sata_port *sata_port = starget->hostdata;
3523 starget->hostdata = NULL;
3524 ata_sas_port_destroy(sata_port->ap);
3530 * ipr_find_sdev - Find device based on bus/target/lun.
3531 * @sdev: scsi device struct
3534 * resource entry pointer if found / NULL if not found
3536 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3538 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3539 struct ipr_resource_entry *res;
3541 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3542 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3543 (res->cfgte.res_addr.target == sdev->id) &&
3544 (res->cfgte.res_addr.lun == sdev->lun))
3552 * ipr_slave_destroy - Unconfigure a SCSI device
3553 * @sdev: scsi device struct
3558 static void ipr_slave_destroy(struct scsi_device *sdev)
3560 struct ipr_resource_entry *res;
3561 struct ipr_ioa_cfg *ioa_cfg;
3562 unsigned long lock_flags = 0;
3564 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3566 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3567 res = (struct ipr_resource_entry *) sdev->hostdata;
3570 ata_port_disable(res->sata_port->ap);
3571 sdev->hostdata = NULL;
3573 res->sata_port = NULL;
3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3579 * ipr_slave_configure - Configure a SCSI device
3580 * @sdev: scsi device struct
3582 * This function configures the specified scsi device.
3587 static int ipr_slave_configure(struct scsi_device *sdev)
3589 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3590 struct ipr_resource_entry *res;
3591 unsigned long lock_flags = 0;
3593 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3594 res = sdev->hostdata;
3596 if (ipr_is_af_dasd_device(res))
3597 sdev->type = TYPE_RAID;
3598 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3599 sdev->scsi_level = 4;
3600 sdev->no_uld_attach = 1;
3602 if (ipr_is_vset_device(res)) {
3603 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3604 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3606 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3607 sdev->allow_restart = 1;
3608 if (ipr_is_gata(res) && res->sata_port) {
3609 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3610 ata_sas_slave_configure(sdev, res->sata_port->ap);
3612 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3620 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3621 * @sdev: scsi device struct
3623 * This function initializes an ATA port so that future commands
3624 * sent through queuecommand will work.
3629 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3631 struct ipr_sata_port *sata_port = NULL;
3635 if (sdev->sdev_target)
3636 sata_port = sdev->sdev_target->hostdata;
3638 rc = ata_sas_port_init(sata_port->ap);
3640 ipr_slave_destroy(sdev);
3647 * ipr_slave_alloc - Prepare for commands to a device.
3648 * @sdev: scsi device struct
3650 * This function saves a pointer to the resource entry
3651 * in the scsi device struct if the device exists. We
3652 * can then use this pointer in ipr_queuecommand when
3653 * handling new commands.
3656 * 0 on success / -ENXIO if device does not exist
3658 static int ipr_slave_alloc(struct scsi_device *sdev)
3660 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3661 struct ipr_resource_entry *res;
3662 unsigned long lock_flags;
3665 sdev->hostdata = NULL;
3667 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3669 res = ipr_find_sdev(sdev);
3674 sdev->hostdata = res;
3675 if (!ipr_is_naca_model(res))
3676 res->needs_sync_complete = 1;
3678 if (ipr_is_gata(res)) {
3679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 return ipr_ata_slave_alloc(sdev);
3684 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3690 * ipr_eh_host_reset - Reset the host adapter
3691 * @scsi_cmd: scsi command struct
3696 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3698 struct ipr_ioa_cfg *ioa_cfg;
3702 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3704 dev_err(&ioa_cfg->pdev->dev,
3705 "Adapter being reset as a result of error recovery.\n");
3707 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3708 ioa_cfg->sdt_state = GET_DUMP;
3710 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3716 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3720 spin_lock_irq(cmd->device->host->host_lock);
3721 rc = __ipr_eh_host_reset(cmd);
3722 spin_unlock_irq(cmd->device->host->host_lock);
3728 * ipr_device_reset - Reset the device
3729 * @ioa_cfg: ioa config struct
3730 * @res: resource entry struct
3732 * This function issues a device reset to the affected device.
3733 * If the device is a SCSI device, a LUN reset will be sent
3734 * to the device first. If that does not work, a target reset
3735 * will be sent. If the device is a SATA device, a PHY reset will
3739 * 0 on success / non-zero on failure
3741 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3742 struct ipr_resource_entry *res)
3744 struct ipr_cmnd *ipr_cmd;
3745 struct ipr_ioarcb *ioarcb;
3746 struct ipr_cmd_pkt *cmd_pkt;
3747 struct ipr_ioarcb_ata_regs *regs;
3751 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3752 ioarcb = &ipr_cmd->ioarcb;
3753 cmd_pkt = &ioarcb->cmd_pkt;
3754 regs = &ioarcb->add_data.u.regs;
3756 ioarcb->res_handle = res->cfgte.res_handle;
3757 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3758 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3759 if (ipr_is_gata(res)) {
3760 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3761 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3762 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3765 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3766 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3767 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3768 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3769 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3770 sizeof(struct ipr_ioasa_gata));
3773 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3777 * ipr_sata_reset - Reset the SATA port
3778 * @ap: SATA port to reset
3779 * @classes: class of the attached device
3781 * This function issues a SATA phy reset to the affected ATA port.
3784 * 0 on success / non-zero on failure
3786 static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3787 unsigned long deadline)
3789 struct ipr_sata_port *sata_port = ap->private_data;
3790 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3791 struct ipr_resource_entry *res;
3792 unsigned long lock_flags = 0;
3796 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3797 while(ioa_cfg->in_reset_reload) {
3798 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3799 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3800 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3803 res = sata_port->res;
3805 rc = ipr_device_reset(ioa_cfg, res);
3806 switch(res->cfgte.proto) {
3807 case IPR_PROTO_SATA:
3808 case IPR_PROTO_SAS_STP:
3809 *classes = ATA_DEV_ATA;
3811 case IPR_PROTO_SATA_ATAPI:
3812 case IPR_PROTO_SAS_STP_ATAPI:
3813 *classes = ATA_DEV_ATAPI;
3816 *classes = ATA_DEV_UNKNOWN;
3821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3827 * ipr_eh_dev_reset - Reset the device
3828 * @scsi_cmd: scsi command struct
3830 * This function issues a device reset to the affected device.
3831 * A LUN reset will be sent to the device first. If that does
3832 * not work, a target reset will be sent.
3837 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3839 struct ipr_cmnd *ipr_cmd;
3840 struct ipr_ioa_cfg *ioa_cfg;
3841 struct ipr_resource_entry *res;
3842 struct ata_port *ap;
3846 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3847 res = scsi_cmd->device->hostdata;
3853 * If we are currently going through reset/reload, return failed. This will force the
3854 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3857 if (ioa_cfg->in_reset_reload)
3859 if (ioa_cfg->ioa_is_dead)
3862 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3863 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3864 if (ipr_cmd->scsi_cmd)
3865 ipr_cmd->done = ipr_scsi_eh_done;
3867 ipr_cmd->done = ipr_sata_eh_done;
3868 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3869 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3870 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3875 res->resetting_device = 1;
3876 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3878 if (ipr_is_gata(res) && res->sata_port) {
3879 ap = res->sata_port->ap;
3880 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3881 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3882 spin_lock_irq(scsi_cmd->device->host->host_lock);
3884 rc = ipr_device_reset(ioa_cfg, res);
3885 res->resetting_device = 0;
3888 return (rc ? FAILED : SUCCESS);
3891 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3895 spin_lock_irq(cmd->device->host->host_lock);
3896 rc = __ipr_eh_dev_reset(cmd);
3897 spin_unlock_irq(cmd->device->host->host_lock);
3903 * ipr_bus_reset_done - Op done function for bus reset.
3904 * @ipr_cmd: ipr command struct
3906 * This function is the op done function for a bus reset
3911 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3913 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3914 struct ipr_resource_entry *res;
3917 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3918 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3919 sizeof(res->cfgte.res_handle))) {
3920 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3926 * If abort has not completed, indicate the reset has, else call the
3927 * abort's done function to wake the sleeping eh thread
3929 if (ipr_cmd->sibling->sibling)
3930 ipr_cmd->sibling->sibling = NULL;
3932 ipr_cmd->sibling->done(ipr_cmd->sibling);
3934 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3939 * ipr_abort_timeout - An abort task has timed out
3940 * @ipr_cmd: ipr command struct
3942 * This function handles when an abort task times out. If this
3943 * happens we issue a bus reset since we have resources tied
3944 * up that must be freed before returning to the midlayer.
3949 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3951 struct ipr_cmnd *reset_cmd;
3952 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3953 struct ipr_cmd_pkt *cmd_pkt;
3954 unsigned long lock_flags = 0;
3957 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3958 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3959 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3963 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3964 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3965 ipr_cmd->sibling = reset_cmd;
3966 reset_cmd->sibling = ipr_cmd;
3967 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3968 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3969 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3970 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3971 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3973 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3979 * ipr_cancel_op - Cancel specified op
3980 * @scsi_cmd: scsi command struct
3982 * This function cancels specified op.
3987 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3989 struct ipr_cmnd *ipr_cmd;
3990 struct ipr_ioa_cfg *ioa_cfg;
3991 struct ipr_resource_entry *res;
3992 struct ipr_cmd_pkt *cmd_pkt;
3997 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3998 res = scsi_cmd->device->hostdata;
4000 /* If we are currently going through reset/reload, return failed.
4001 * This will force the mid-layer to call ipr_eh_host_reset,
4002 * which will then go to sleep and wait for the reset to complete
4004 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4006 if (!res || !ipr_is_gscsi(res))
4009 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4010 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4011 ipr_cmd->done = ipr_scsi_eh_done;
4020 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4021 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4022 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4023 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4024 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4025 ipr_cmd->u.sdev = scsi_cmd->device;
4027 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4029 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4030 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4033 * If the abort task timed out and we sent a bus reset, we will get
4034 * one the following responses to the abort
4036 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4041 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4042 if (!ipr_is_naca_model(res))
4043 res->needs_sync_complete = 1;
4046 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4050 * ipr_eh_abort - Abort a single op
4051 * @scsi_cmd: scsi command struct
4056 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4058 unsigned long flags;
4063 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4064 rc = ipr_cancel_op(scsi_cmd);
4065 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4072 * ipr_handle_other_interrupt - Handle "other" interrupts
4073 * @ioa_cfg: ioa config struct
4074 * @int_reg: interrupt register
4077 * IRQ_NONE / IRQ_HANDLED
4079 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4080 volatile u32 int_reg)
4082 irqreturn_t rc = IRQ_HANDLED;
4084 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4085 /* Mask the interrupt */
4086 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4088 /* Clear the interrupt */
4089 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4090 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4092 list_del(&ioa_cfg->reset_cmd->queue);
4093 del_timer(&ioa_cfg->reset_cmd->timer);
4094 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4096 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4097 ioa_cfg->ioa_unit_checked = 1;
4099 dev_err(&ioa_cfg->pdev->dev,
4100 "Permanent IOA failure. 0x%08X\n", int_reg);
4102 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4103 ioa_cfg->sdt_state = GET_DUMP;
4105 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4106 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4113 * ipr_isr - Interrupt service routine
4115 * @devp: pointer to ioa config struct
4118 * IRQ_NONE / IRQ_HANDLED
4120 static irqreturn_t ipr_isr(int irq, void *devp)
4122 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4123 unsigned long lock_flags = 0;
4124 volatile u32 int_reg, int_mask_reg;
4127 struct ipr_cmnd *ipr_cmd;
4128 irqreturn_t rc = IRQ_NONE;
4130 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4132 /* If interrupts are disabled, ignore the interrupt */
4133 if (!ioa_cfg->allow_interrupts) {
4134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4138 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4139 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4141 /* If an interrupt on the adapter did not occur, ignore it */
4142 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4143 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4150 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4151 ioa_cfg->toggle_bit) {
4153 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4154 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4156 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4157 ioa_cfg->errors_logged++;
4158 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4160 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4161 ioa_cfg->sdt_state = GET_DUMP;
4163 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4164 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4168 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4170 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4172 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4174 list_del(&ipr_cmd->queue);
4175 del_timer(&ipr_cmd->timer);
4176 ipr_cmd->done(ipr_cmd);
4180 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4181 ioa_cfg->hrrq_curr++;
4183 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4184 ioa_cfg->toggle_bit ^= 1u;
4188 if (ipr_cmd != NULL) {
4189 /* Clear the PCI interrupt */
4190 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4191 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4196 if (unlikely(rc == IRQ_NONE))
4197 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4204 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4205 * @ioa_cfg: ioa config struct
4206 * @ipr_cmd: ipr command struct
4209 * 0 on success / -1 on failure
4211 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4212 struct ipr_cmnd *ipr_cmd)
4215 struct scatterlist *sglist;
4217 u32 ioadl_flags = 0;
4218 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4219 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4220 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4222 length = scsi_cmd->request_bufflen;
4227 if (scsi_cmd->use_sg) {
4228 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4229 scsi_cmd->request_buffer,
4231 scsi_cmd->sc_data_direction);
4233 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4234 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4235 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4236 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4237 ioarcb->write_ioadl_len =
4238 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4239 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4240 ioadl_flags = IPR_IOADL_FLAGS_READ;
4241 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4242 ioarcb->read_ioadl_len =
4243 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4246 sglist = scsi_cmd->request_buffer;
4248 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4249 ioadl = ioarcb->add_data.u.ioadl;
4250 ioarcb->write_ioadl_addr =
4251 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4252 offsetof(struct ipr_ioarcb, add_data));
4253 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4256 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4257 ioadl[i].flags_and_data_len =
4258 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4260 cpu_to_be32(sg_dma_address(&sglist[i]));
4263 if (likely(ipr_cmd->dma_use_sg)) {
4264 ioadl[i-1].flags_and_data_len |=
4265 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4268 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4270 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4271 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4272 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4273 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4274 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4275 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4276 ioadl_flags = IPR_IOADL_FLAGS_READ;
4277 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4278 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4281 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4282 scsi_cmd->request_buffer, length,
4283 scsi_cmd->sc_data_direction);
4285 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4286 ioadl = ioarcb->add_data.u.ioadl;
4287 ioarcb->write_ioadl_addr =
4288 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4289 offsetof(struct ipr_ioarcb, add_data));
4290 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4291 ipr_cmd->dma_use_sg = 1;
4292 ioadl[0].flags_and_data_len =
4293 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4294 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4297 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4304 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4305 * @scsi_cmd: scsi command struct
4310 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4313 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4315 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4317 case MSG_SIMPLE_TAG:
4318 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4321 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4323 case MSG_ORDERED_TAG:
4324 rc = IPR_FLAGS_LO_ORDERED_TASK;
4333 * ipr_erp_done - Process completion of ERP for a device
4334 * @ipr_cmd: ipr command struct
4336 * This function copies the sense buffer into the scsi_cmd
4337 * struct and pushes the scsi_done function.
4342 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4344 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4345 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4346 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4347 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4349 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4350 scsi_cmd->result |= (DID_ERROR << 16);
4351 scmd_printk(KERN_ERR, scsi_cmd,
4352 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4354 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4355 SCSI_SENSE_BUFFERSIZE);
4359 if (!ipr_is_naca_model(res))
4360 res->needs_sync_complete = 1;
4363 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4364 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4365 scsi_cmd->scsi_done(scsi_cmd);
4369 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4370 * @ipr_cmd: ipr command struct
4375 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4377 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4378 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4379 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4381 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4382 ioarcb->write_data_transfer_length = 0;
4383 ioarcb->read_data_transfer_length = 0;
4384 ioarcb->write_ioadl_len = 0;
4385 ioarcb->read_ioadl_len = 0;
4387 ioasa->residual_data_len = 0;
4388 ioarcb->write_ioadl_addr =
4389 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4390 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4394 * ipr_erp_request_sense - Send request sense to a device
4395 * @ipr_cmd: ipr command struct
4397 * This function sends a request sense to a device as a result
4398 * of a check condition.
4403 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4405 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4406 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4408 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4409 ipr_erp_done(ipr_cmd);
4413 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4415 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4416 cmd_pkt->cdb[0] = REQUEST_SENSE;
4417 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4418 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4419 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4420 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4422 ipr_cmd->ioadl[0].flags_and_data_len =
4423 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4424 ipr_cmd->ioadl[0].address =
4425 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4427 ipr_cmd->ioarcb.read_ioadl_len =
4428 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4429 ipr_cmd->ioarcb.read_data_transfer_length =
4430 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4432 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4433 IPR_REQUEST_SENSE_TIMEOUT * 2);
4437 * ipr_erp_cancel_all - Send cancel all to a device
4438 * @ipr_cmd: ipr command struct
4440 * This function sends a cancel all to a device to clear the
4441 * queue. If we are running TCQ on the device, QERR is set to 1,
4442 * which means all outstanding ops have been dropped on the floor.
4443 * Cancel all will return them to us.
4448 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4450 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4451 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4452 struct ipr_cmd_pkt *cmd_pkt;
4456 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4458 if (!scsi_get_tag_type(scsi_cmd->device)) {
4459 ipr_erp_request_sense(ipr_cmd);
4463 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4464 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4465 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4467 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4468 IPR_CANCEL_ALL_TIMEOUT);
4472 * ipr_dump_ioasa - Dump contents of IOASA
4473 * @ioa_cfg: ioa config struct
4474 * @ipr_cmd: ipr command struct
4475 * @res: resource entry struct
4477 * This function is invoked by the interrupt handler when ops
4478 * fail. It will log the IOASA if appropriate. Only called
4484 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4485 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4489 u32 ioasc, fd_ioasc;
4490 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4491 __be32 *ioasa_data = (__be32 *)ioasa;
4494 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4495 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4500 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4503 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4504 error_index = ipr_get_error(fd_ioasc);
4506 error_index = ipr_get_error(ioasc);
4508 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4509 /* Don't log an error if the IOA already logged one */
4510 if (ioasa->ilid != 0)
4513 if (!ipr_is_gscsi(res))
4516 if (ipr_error_table[error_index].log_ioasa == 0)
4520 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4522 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4523 data_len = sizeof(struct ipr_ioasa);
4525 data_len = be16_to_cpu(ioasa->ret_stat_len);
4527 ipr_err("IOASA Dump:\n");
4529 for (i = 0; i < data_len / 4; i += 4) {
4530 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4531 be32_to_cpu(ioasa_data[i]),
4532 be32_to_cpu(ioasa_data[i+1]),
4533 be32_to_cpu(ioasa_data[i+2]),
4534 be32_to_cpu(ioasa_data[i+3]));
4539 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4541 * @sense_buf: sense data buffer
4546 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4549 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4550 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4551 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4552 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4554 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4556 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4559 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4561 if (ipr_is_vset_device(res) &&
4562 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4563 ioasa->u.vset.failing_lba_hi != 0) {
4564 sense_buf[0] = 0x72;
4565 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4566 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4567 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4571 sense_buf[9] = 0x0A;
4572 sense_buf[10] = 0x80;
4574 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4576 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4577 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4578 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4579 sense_buf[15] = failing_lba & 0x000000ff;
4581 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4583 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4584 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4585 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4586 sense_buf[19] = failing_lba & 0x000000ff;
4588 sense_buf[0] = 0x70;
4589 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4590 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4591 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4593 /* Illegal request */
4594 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4595 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4596 sense_buf[7] = 10; /* additional length */
4598 /* IOARCB was in error */
4599 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4600 sense_buf[15] = 0xC0;
4601 else /* Parameter data was invalid */
4602 sense_buf[15] = 0x80;
4605 ((IPR_FIELD_POINTER_MASK &
4606 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4608 (IPR_FIELD_POINTER_MASK &
4609 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4611 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4612 if (ipr_is_vset_device(res))
4613 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4615 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4617 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4618 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4619 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4620 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4621 sense_buf[6] = failing_lba & 0x000000ff;
4624 sense_buf[7] = 6; /* additional length */
4630 * ipr_get_autosense - Copy autosense data to sense buffer
4631 * @ipr_cmd: ipr command struct
4633 * This function copies the autosense buffer to the buffer
4634 * in the scsi_cmd, if there is autosense available.
4637 * 1 if autosense was available / 0 if not
4639 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4641 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4643 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4646 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4647 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4648 SCSI_SENSE_BUFFERSIZE));
4653 * ipr_erp_start - Process an error response for a SCSI op
4654 * @ioa_cfg: ioa config struct
4655 * @ipr_cmd: ipr command struct
4657 * This function determines whether or not to initiate ERP
4658 * on the affected device.
4663 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4664 struct ipr_cmnd *ipr_cmd)
4666 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4667 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4668 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4671 ipr_scsi_eh_done(ipr_cmd);
4675 if (!ipr_is_gscsi(res))
4676 ipr_gen_sense(ipr_cmd);
4678 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4680 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4681 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4682 if (ipr_is_naca_model(res))
4683 scsi_cmd->result |= (DID_ABORT << 16);
4685 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4687 case IPR_IOASC_IR_RESOURCE_HANDLE:
4688 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4689 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4691 case IPR_IOASC_HW_SEL_TIMEOUT:
4692 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4693 if (!ipr_is_naca_model(res))
4694 res->needs_sync_complete = 1;
4696 case IPR_IOASC_SYNC_REQUIRED:
4698 res->needs_sync_complete = 1;
4699 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4701 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4702 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4703 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4705 case IPR_IOASC_BUS_WAS_RESET:
4706 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4708 * Report the bus reset and ask for a retry. The device
4709 * will give CC/UA the next command.
4711 if (!res->resetting_device)
4712 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4713 scsi_cmd->result |= (DID_ERROR << 16);
4714 if (!ipr_is_naca_model(res))
4715 res->needs_sync_complete = 1;
4717 case IPR_IOASC_HW_DEV_BUS_STATUS:
4718 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4719 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4720 if (!ipr_get_autosense(ipr_cmd)) {
4721 if (!ipr_is_naca_model(res)) {
4722 ipr_erp_cancel_all(ipr_cmd);
4727 if (!ipr_is_naca_model(res))
4728 res->needs_sync_complete = 1;
4730 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4733 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4734 scsi_cmd->result |= (DID_ERROR << 16);
4735 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4736 res->needs_sync_complete = 1;
4740 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4741 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4742 scsi_cmd->scsi_done(scsi_cmd);
4746 * ipr_scsi_done - mid-layer done function
4747 * @ipr_cmd: ipr command struct
4749 * This function is invoked by the interrupt handler for
4750 * ops generated by the SCSI mid-layer
4755 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4757 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4758 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4759 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4761 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4763 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4764 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4765 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4766 scsi_cmd->scsi_done(scsi_cmd);
4768 ipr_erp_start(ioa_cfg, ipr_cmd);
4772 * ipr_queuecommand - Queue a mid-layer request
4773 * @scsi_cmd: scsi command struct
4774 * @done: done function
4776 * This function queues a request generated by the mid-layer.
4780 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4781 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4783 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4784 void (*done) (struct scsi_cmnd *))
4786 struct ipr_ioa_cfg *ioa_cfg;
4787 struct ipr_resource_entry *res;
4788 struct ipr_ioarcb *ioarcb;
4789 struct ipr_cmnd *ipr_cmd;
4792 scsi_cmd->scsi_done = done;
4793 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4794 res = scsi_cmd->device->hostdata;
4795 scsi_cmd->result = (DID_OK << 16);
4798 * We are currently blocking all devices due to a host reset
4799 * We have told the host to stop giving us new requests, but
4800 * ERP ops don't count. FIXME
4802 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4803 return SCSI_MLQUEUE_HOST_BUSY;
4806 * FIXME - Create scsi_set_host_offline interface
4807 * and the ioa_is_dead check can be removed
4809 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4810 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4811 scsi_cmd->result = (DID_NO_CONNECT << 16);
4812 scsi_cmd->scsi_done(scsi_cmd);
4816 if (ipr_is_gata(res) && res->sata_port)
4817 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4819 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4820 ioarcb = &ipr_cmd->ioarcb;
4821 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4823 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4824 ipr_cmd->scsi_cmd = scsi_cmd;
4825 ioarcb->res_handle = res->cfgte.res_handle;
4826 ipr_cmd->done = ipr_scsi_done;
4827 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4829 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4830 if (scsi_cmd->underflow == 0)
4831 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4833 if (res->needs_sync_complete) {
4834 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4835 res->needs_sync_complete = 0;
4838 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4839 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4840 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4841 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4844 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4845 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4846 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4848 if (likely(rc == 0))
4849 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4851 if (likely(rc == 0)) {
4853 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4854 ioa_cfg->regs.ioarrin_reg);
4856 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4857 return SCSI_MLQUEUE_HOST_BUSY;
4864 * ipr_ioctl - IOCTL handler
4865 * @sdev: scsi device struct
4870 * 0 on success / other on failure
4872 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4874 struct ipr_resource_entry *res;
4876 res = (struct ipr_resource_entry *)sdev->hostdata;
4877 if (res && ipr_is_gata(res))
4878 return ata_scsi_ioctl(sdev, cmd, arg);
4884 * ipr_info - Get information about the card/driver
4885 * @scsi_host: scsi host struct
4888 * pointer to buffer with description string
4890 static const char * ipr_ioa_info(struct Scsi_Host *host)
4892 static char buffer[512];
4893 struct ipr_ioa_cfg *ioa_cfg;
4894 unsigned long lock_flags = 0;
4896 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4898 spin_lock_irqsave(host->host_lock, lock_flags);
4899 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4900 spin_unlock_irqrestore(host->host_lock, lock_flags);
4905 static struct scsi_host_template driver_template = {
4906 .module = THIS_MODULE,
4908 .info = ipr_ioa_info,
4910 .queuecommand = ipr_queuecommand,
4911 .eh_abort_handler = ipr_eh_abort,
4912 .eh_device_reset_handler = ipr_eh_dev_reset,
4913 .eh_host_reset_handler = ipr_eh_host_reset,
4914 .slave_alloc = ipr_slave_alloc,
4915 .slave_configure = ipr_slave_configure,
4916 .slave_destroy = ipr_slave_destroy,
4917 .target_alloc = ipr_target_alloc,
4918 .target_destroy = ipr_target_destroy,
4919 .change_queue_depth = ipr_change_queue_depth,
4920 .change_queue_type = ipr_change_queue_type,
4921 .bios_param = ipr_biosparam,
4922 .can_queue = IPR_MAX_COMMANDS,
4924 .sg_tablesize = IPR_MAX_SGLIST,
4925 .max_sectors = IPR_IOA_MAX_SECTORS,
4926 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4927 .use_clustering = ENABLE_CLUSTERING,
4928 .shost_attrs = ipr_ioa_attrs,
4929 .sdev_attrs = ipr_dev_attrs,
4930 .proc_name = IPR_NAME
4934 * ipr_ata_phy_reset - libata phy_reset handler
4935 * @ap: ata port to reset
4938 static void ipr_ata_phy_reset(struct ata_port *ap)
4940 unsigned long flags;
4941 struct ipr_sata_port *sata_port = ap->private_data;
4942 struct ipr_resource_entry *res = sata_port->res;
4943 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4947 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4948 while(ioa_cfg->in_reset_reload) {
4949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4950 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4951 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4954 if (!ioa_cfg->allow_cmds)
4957 rc = ipr_device_reset(ioa_cfg, res);
4960 ap->ops->port_disable(ap);
4964 switch(res->cfgte.proto) {
4965 case IPR_PROTO_SATA:
4966 case IPR_PROTO_SAS_STP:
4967 ap->device[0].class = ATA_DEV_ATA;
4969 case IPR_PROTO_SATA_ATAPI:
4970 case IPR_PROTO_SAS_STP_ATAPI:
4971 ap->device[0].class = ATA_DEV_ATAPI;
4974 ap->device[0].class = ATA_DEV_UNKNOWN;
4975 ap->ops->port_disable(ap);
4980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4985 * ipr_ata_post_internal - Cleanup after an internal command
4986 * @qc: ATA queued command
4991 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4993 struct ipr_sata_port *sata_port = qc->ap->private_data;
4994 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4995 struct ipr_cmnd *ipr_cmd;
4996 unsigned long flags;
4998 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4999 while(ioa_cfg->in_reset_reload) {
5000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5001 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5002 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5005 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5006 if (ipr_cmd->qc == qc) {
5007 ipr_device_reset(ioa_cfg, sata_port->res);
5011 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5015 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5017 * @tf: destination ATA taskfile
5022 static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5024 struct ipr_sata_port *sata_port = ap->private_data;
5025 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5027 tf->feature = g->error;
5028 tf->nsect = g->nsect;
5032 tf->device = g->device;
5033 tf->command = g->status;
5034 tf->hob_nsect = g->hob_nsect;
5035 tf->hob_lbal = g->hob_lbal;
5036 tf->hob_lbam = g->hob_lbam;
5037 tf->hob_lbah = g->hob_lbah;
5038 tf->ctl = g->alt_status;
5042 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5043 * @regs: destination
5044 * @tf: source ATA taskfile
5049 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5050 struct ata_taskfile *tf)
5052 regs->feature = tf->feature;
5053 regs->nsect = tf->nsect;
5054 regs->lbal = tf->lbal;
5055 regs->lbam = tf->lbam;
5056 regs->lbah = tf->lbah;
5057 regs->device = tf->device;
5058 regs->command = tf->command;
5059 regs->hob_feature = tf->hob_feature;
5060 regs->hob_nsect = tf->hob_nsect;
5061 regs->hob_lbal = tf->hob_lbal;
5062 regs->hob_lbam = tf->hob_lbam;
5063 regs->hob_lbah = tf->hob_lbah;
5064 regs->ctl = tf->ctl;
5068 * ipr_sata_done - done function for SATA commands
5069 * @ipr_cmd: ipr command struct
5071 * This function is invoked by the interrupt handler for
5072 * ops generated by the SCSI mid-layer to SATA devices
5077 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5079 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5080 struct ata_queued_cmd *qc = ipr_cmd->qc;
5081 struct ipr_sata_port *sata_port = qc->ap->private_data;
5082 struct ipr_resource_entry *res = sata_port->res;
5083 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5085 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5086 sizeof(struct ipr_ioasa_gata));
5087 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5089 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5090 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5091 res->cfgte.res_addr.target);
5093 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5094 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5096 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5097 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5098 ata_qc_complete(qc);
5102 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5103 * @ipr_cmd: ipr command struct
5104 * @qc: ATA queued command
5107 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5108 struct ata_queued_cmd *qc)
5110 u32 ioadl_flags = 0;
5111 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5112 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5113 int len = qc->nbytes + qc->pad_len;
5114 struct scatterlist *sg;
5119 if (qc->dma_dir == DMA_TO_DEVICE) {
5120 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5121 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5122 ioarcb->write_data_transfer_length = cpu_to_be32(len);
5123 ioarcb->write_ioadl_len =
5124 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5125 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5126 ioadl_flags = IPR_IOADL_FLAGS_READ;
5127 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5128 ioarcb->read_ioadl_len =
5129 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5132 ata_for_each_sg(sg, qc) {
5133 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5134 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5135 if (ata_sg_is_last(sg, qc))
5136 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5143 * ipr_qc_issue - Issue a SATA qc to a device
5144 * @qc: queued command
5149 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5151 struct ata_port *ap = qc->ap;
5152 struct ipr_sata_port *sata_port = ap->private_data;
5153 struct ipr_resource_entry *res = sata_port->res;
5154 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5155 struct ipr_cmnd *ipr_cmd;
5156 struct ipr_ioarcb *ioarcb;
5157 struct ipr_ioarcb_ata_regs *regs;
5159 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5160 return AC_ERR_SYSTEM;
5162 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5163 ioarcb = &ipr_cmd->ioarcb;
5164 regs = &ioarcb->add_data.u.regs;
5166 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5167 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5169 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5171 ipr_cmd->done = ipr_sata_done;
5172 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5173 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5174 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5175 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5176 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5178 ipr_build_ata_ioadl(ipr_cmd, qc);
5179 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5180 ipr_copy_sata_tf(regs, &qc->tf);
5181 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5182 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5184 switch (qc->tf.protocol) {
5185 case ATA_PROT_NODATA:
5190 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5193 case ATA_PROT_ATAPI:
5194 case ATA_PROT_ATAPI_NODATA:
5195 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5198 case ATA_PROT_ATAPI_DMA:
5199 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5200 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5205 return AC_ERR_INVALID;
5209 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5210 ioa_cfg->regs.ioarrin_reg);
5215 * ipr_ata_check_status - Return last ATA status
5221 static u8 ipr_ata_check_status(struct ata_port *ap)
5223 struct ipr_sata_port *sata_port = ap->private_data;
5224 return sata_port->ioasa.status;
5228 * ipr_ata_check_altstatus - Return last ATA altstatus
5234 static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5236 struct ipr_sata_port *sata_port = ap->private_data;
5237 return sata_port->ioasa.alt_status;
5240 static struct ata_port_operations ipr_sata_ops = {
5241 .port_disable = ata_port_disable,
5242 .check_status = ipr_ata_check_status,
5243 .check_altstatus = ipr_ata_check_altstatus,
5244 .dev_select = ata_noop_dev_select,
5245 .phy_reset = ipr_ata_phy_reset,
5246 .post_internal_cmd = ipr_ata_post_internal,
5247 .tf_read = ipr_tf_read,
5248 .qc_prep = ata_noop_qc_prep,
5249 .qc_issue = ipr_qc_issue,
5250 .port_start = ata_sas_port_start,
5251 .port_stop = ata_sas_port_stop
5254 static struct ata_port_info sata_port_info = {
5255 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5256 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5257 .pio_mask = 0x10, /* pio4 */
5259 .udma_mask = 0x7f, /* udma0-6 */
5260 .port_ops = &ipr_sata_ops
5263 #ifdef CONFIG_PPC_PSERIES
5264 static const u16 ipr_blocked_processors[] = {
5276 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5277 * @ioa_cfg: ioa cfg struct
5279 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5280 * certain pSeries hardware. This function determines if the given
5281 * adapter is in one of these confgurations or not.
5284 * 1 if adapter is not supported / 0 if adapter is supported
5286 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5291 if (ioa_cfg->type == 0x5702) {
5292 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5293 &rev_id) == PCIBIOS_SUCCESSFUL) {
5295 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5296 if (__is_processor(ipr_blocked_processors[i]))
5305 #define ipr_invalid_adapter(ioa_cfg) 0
5309 * ipr_ioa_bringdown_done - IOA bring down completion.
5310 * @ipr_cmd: ipr command struct
5312 * This function processes the completion of an adapter bring down.
5313 * It wakes any reset sleepers.
5318 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5320 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5323 ioa_cfg->in_reset_reload = 0;
5324 ioa_cfg->reset_retries = 0;
5325 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5326 wake_up_all(&ioa_cfg->reset_wait_q);
5328 spin_unlock_irq(ioa_cfg->host->host_lock);
5329 scsi_unblock_requests(ioa_cfg->host);
5330 spin_lock_irq(ioa_cfg->host->host_lock);
5333 return IPR_RC_JOB_RETURN;
5337 * ipr_ioa_reset_done - IOA reset completion.
5338 * @ipr_cmd: ipr command struct
5340 * This function processes the completion of an adapter reset.
5341 * It schedules any necessary mid-layer add/removes and
5342 * wakes any reset sleepers.
5347 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5349 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5350 struct ipr_resource_entry *res;
5351 struct ipr_hostrcb *hostrcb, *temp;
5355 ioa_cfg->in_reset_reload = 0;
5356 ioa_cfg->allow_cmds = 1;
5357 ioa_cfg->reset_cmd = NULL;
5358 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5360 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5361 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5366 schedule_work(&ioa_cfg->work_q);
5368 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5369 list_del(&hostrcb->queue);
5370 if (i++ < IPR_NUM_LOG_HCAMS)
5371 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5373 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5376 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5378 ioa_cfg->reset_retries = 0;
5379 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5380 wake_up_all(&ioa_cfg->reset_wait_q);
5382 spin_unlock_irq(ioa_cfg->host->host_lock);
5383 scsi_unblock_requests(ioa_cfg->host);
5384 spin_lock_irq(ioa_cfg->host->host_lock);
5386 if (!ioa_cfg->allow_cmds)
5387 scsi_block_requests(ioa_cfg->host);
5390 return IPR_RC_JOB_RETURN;
5394 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5395 * @supported_dev: supported device struct
5396 * @vpids: vendor product id struct
5401 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5402 struct ipr_std_inq_vpids *vpids)
5404 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5405 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5406 supported_dev->num_records = 1;
5407 supported_dev->data_length =
5408 cpu_to_be16(sizeof(struct ipr_supported_device));
5409 supported_dev->reserved = 0;
5413 * ipr_set_supported_devs - Send Set Supported Devices for a device
5414 * @ipr_cmd: ipr command struct
5416 * This function send a Set Supported Devices to the adapter
5419 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5421 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5424 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5425 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5426 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5427 struct ipr_resource_entry *res = ipr_cmd->u.res;
5429 ipr_cmd->job_step = ipr_ioa_reset_done;
5431 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5432 if (!ipr_is_scsi_disk(res))
5435 ipr_cmd->u.res = res;
5436 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5438 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5439 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5440 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5442 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5443 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5444 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5446 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5447 sizeof(struct ipr_supported_device));
5448 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5449 offsetof(struct ipr_misc_cbs, supp_dev));
5450 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5451 ioarcb->write_data_transfer_length =
5452 cpu_to_be32(sizeof(struct ipr_supported_device));
5454 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5455 IPR_SET_SUP_DEVICE_TIMEOUT);
5457 ipr_cmd->job_step = ipr_set_supported_devs;
5458 return IPR_RC_JOB_RETURN;
5461 return IPR_RC_JOB_CONTINUE;
5465 * ipr_setup_write_cache - Disable write cache if needed
5466 * @ipr_cmd: ipr command struct
5468 * This function sets up adapters write cache to desired setting
5471 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5473 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5475 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5477 ipr_cmd->job_step = ipr_set_supported_devs;
5478 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5479 struct ipr_resource_entry, queue);
5481 if (ioa_cfg->cache_state != CACHE_DISABLED)
5482 return IPR_RC_JOB_CONTINUE;
5484 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5485 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5486 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5487 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5489 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5491 return IPR_RC_JOB_RETURN;
5495 * ipr_get_mode_page - Locate specified mode page
5496 * @mode_pages: mode page buffer
5497 * @page_code: page code to find
5498 * @len: minimum required length for mode page
5501 * pointer to mode page / NULL on failure
5503 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5504 u32 page_code, u32 len)
5506 struct ipr_mode_page_hdr *mode_hdr;
5510 if (!mode_pages || (mode_pages->hdr.length == 0))
5513 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5514 mode_hdr = (struct ipr_mode_page_hdr *)
5515 (mode_pages->data + mode_pages->hdr.block_desc_len);
5518 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5519 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5523 page_length = (sizeof(struct ipr_mode_page_hdr) +
5524 mode_hdr->page_length);
5525 length -= page_length;
5526 mode_hdr = (struct ipr_mode_page_hdr *)
5527 ((unsigned long)mode_hdr + page_length);
5534 * ipr_check_term_power - Check for term power errors
5535 * @ioa_cfg: ioa config struct
5536 * @mode_pages: IOAFP mode pages buffer
5538 * Check the IOAFP's mode page 28 for term power errors
5543 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5544 struct ipr_mode_pages *mode_pages)
5548 struct ipr_dev_bus_entry *bus;
5549 struct ipr_mode_page28 *mode_page;
5551 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5552 sizeof(struct ipr_mode_page28));
5554 entry_length = mode_page->entry_length;
5556 bus = mode_page->bus;
5558 for (i = 0; i < mode_page->num_entries; i++) {
5559 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5560 dev_err(&ioa_cfg->pdev->dev,
5561 "Term power is absent on scsi bus %d\n",
5565 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5570 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5571 * @ioa_cfg: ioa config struct
5573 * Looks through the config table checking for SES devices. If
5574 * the SES device is in the SES table indicating a maximum SCSI
5575 * bus speed, the speed is limited for the bus.
5580 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5585 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5586 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5587 ioa_cfg->bus_attr[i].bus_width);
5589 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5590 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5595 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5596 * @ioa_cfg: ioa config struct
5597 * @mode_pages: mode page 28 buffer
5599 * Updates mode page 28 based on driver configuration
5604 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5605 struct ipr_mode_pages *mode_pages)
5607 int i, entry_length;
5608 struct ipr_dev_bus_entry *bus;
5609 struct ipr_bus_attributes *bus_attr;
5610 struct ipr_mode_page28 *mode_page;
5612 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5613 sizeof(struct ipr_mode_page28));
5615 entry_length = mode_page->entry_length;
5617 /* Loop for each device bus entry */
5618 for (i = 0, bus = mode_page->bus;
5619 i < mode_page->num_entries;
5620 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5621 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5622 dev_err(&ioa_cfg->pdev->dev,
5623 "Invalid resource address reported: 0x%08X\n",
5624 IPR_GET_PHYS_LOC(bus->res_addr));
5628 bus_attr = &ioa_cfg->bus_attr[i];
5629 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5630 bus->bus_width = bus_attr->bus_width;
5631 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5632 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5633 if (bus_attr->qas_enabled)
5634 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5636 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5641 * ipr_build_mode_select - Build a mode select command
5642 * @ipr_cmd: ipr command struct
5643 * @res_handle: resource handle to send command to
5644 * @parm: Byte 2 of Mode Sense command
5645 * @dma_addr: DMA buffer address
5646 * @xfer_len: data transfer length
5651 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5652 __be32 res_handle, u8 parm, u32 dma_addr,
5655 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5656 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5658 ioarcb->res_handle = res_handle;
5659 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5660 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5661 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5662 ioarcb->cmd_pkt.cdb[1] = parm;
5663 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5665 ioadl->flags_and_data_len =
5666 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5667 ioadl->address = cpu_to_be32(dma_addr);
5668 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5669 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5673 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5674 * @ipr_cmd: ipr command struct
5676 * This function sets up the SCSI bus attributes and sends
5677 * a Mode Select for Page 28 to activate them.
5682 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5684 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5685 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5689 ipr_scsi_bus_speed_limit(ioa_cfg);
5690 ipr_check_term_power(ioa_cfg, mode_pages);
5691 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5692 length = mode_pages->hdr.length + 1;
5693 mode_pages->hdr.length = 0;
5695 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5696 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5699 ipr_cmd->job_step = ipr_setup_write_cache;
5700 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5703 return IPR_RC_JOB_RETURN;
5707 * ipr_build_mode_sense - Builds a mode sense command
5708 * @ipr_cmd: ipr command struct
5709 * @res: resource entry struct
5710 * @parm: Byte 2 of mode sense command
5711 * @dma_addr: DMA address of mode sense buffer
5712 * @xfer_len: Size of DMA buffer
5717 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5719 u8 parm, u32 dma_addr, u8 xfer_len)
5721 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5722 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5724 ioarcb->res_handle = res_handle;
5725 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5726 ioarcb->cmd_pkt.cdb[2] = parm;
5727 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5728 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5730 ioadl->flags_and_data_len =
5731 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5732 ioadl->address = cpu_to_be32(dma_addr);
5733 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5734 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5738 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5739 * @ipr_cmd: ipr command struct
5741 * This function handles the failure of an IOA bringup command.
5746 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5748 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5749 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5751 dev_err(&ioa_cfg->pdev->dev,
5752 "0x%02X failed with IOASC: 0x%08X\n",
5753 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5755 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5756 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5757 return IPR_RC_JOB_RETURN;
5761 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5762 * @ipr_cmd: ipr command struct
5764 * This function handles the failure of a Mode Sense to the IOAFP.
5765 * Some adapters do not handle all mode pages.
5768 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5770 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5772 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5774 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5775 ipr_cmd->job_step = ipr_setup_write_cache;
5776 return IPR_RC_JOB_CONTINUE;
5779 return ipr_reset_cmd_failed(ipr_cmd);
5783 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5784 * @ipr_cmd: ipr command struct
5786 * This function send a Page 28 mode sense to the IOA to
5787 * retrieve SCSI bus attributes.
5792 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5794 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5797 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5798 0x28, ioa_cfg->vpd_cbs_dma +
5799 offsetof(struct ipr_misc_cbs, mode_pages),
5800 sizeof(struct ipr_mode_pages));
5802 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5803 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5805 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5808 return IPR_RC_JOB_RETURN;
5812 * ipr_init_res_table - Initialize the resource table
5813 * @ipr_cmd: ipr command struct
5815 * This function looks through the existing resource table, comparing
5816 * it with the config table. This function will take care of old/new
5817 * devices and schedule adding/removing them from the mid-layer
5821 * IPR_RC_JOB_CONTINUE
5823 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5825 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5826 struct ipr_resource_entry *res, *temp;
5827 struct ipr_config_table_entry *cfgte;
5832 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5833 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5835 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5836 list_move_tail(&res->queue, &old_res);
5838 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5839 cfgte = &ioa_cfg->cfg_table->dev[i];
5842 list_for_each_entry_safe(res, temp, &old_res, queue) {
5843 if (!memcmp(&res->cfgte.res_addr,
5844 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5845 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5852 if (list_empty(&ioa_cfg->free_res_q)) {
5853 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5858 res = list_entry(ioa_cfg->free_res_q.next,
5859 struct ipr_resource_entry, queue);
5860 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5861 ipr_init_res_entry(res);
5866 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5869 list_for_each_entry_safe(res, temp, &old_res, queue) {
5871 res->del_from_ml = 1;
5872 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5873 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5875 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5879 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5882 return IPR_RC_JOB_CONTINUE;
5886 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5887 * @ipr_cmd: ipr command struct
5889 * This function sends a Query IOA Configuration command
5890 * to the adapter to retrieve the IOA configuration table.
5895 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5898 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5899 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5900 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5903 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5904 ucode_vpd->major_release, ucode_vpd->card_type,
5905 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5906 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5907 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5909 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5910 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5911 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5913 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5914 ioarcb->read_data_transfer_length =
5915 cpu_to_be32(sizeof(struct ipr_config_table));
5917 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5918 ioadl->flags_and_data_len =
5919 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5921 ipr_cmd->job_step = ipr_init_res_table;
5923 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5926 return IPR_RC_JOB_RETURN;
5930 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5931 * @ipr_cmd: ipr command struct
5933 * This utility function sends an inquiry to the adapter.
5938 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5939 u32 dma_addr, u8 xfer_len)
5941 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5942 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5945 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5946 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5948 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5949 ioarcb->cmd_pkt.cdb[1] = flags;
5950 ioarcb->cmd_pkt.cdb[2] = page;
5951 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5953 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5954 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5956 ioadl->address = cpu_to_be32(dma_addr);
5957 ioadl->flags_and_data_len =
5958 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5960 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5965 * ipr_inquiry_page_supported - Is the given inquiry page supported
5966 * @page0: inquiry page 0 buffer
5969 * This function determines if the specified inquiry page is supported.
5972 * 1 if page is supported / 0 if not
5974 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5978 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5979 if (page0->page[i] == page)
5986 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5987 * @ipr_cmd: ipr command struct
5989 * This function sends a Page 3 inquiry to the adapter
5990 * to retrieve software VPD information.
5993 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5995 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5997 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5998 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6002 if (!ipr_inquiry_page_supported(page0, 1))
6003 ioa_cfg->cache_state = CACHE_NONE;
6005 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6007 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6008 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6009 sizeof(struct ipr_inquiry_page3));
6012 return IPR_RC_JOB_RETURN;
6016 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6017 * @ipr_cmd: ipr command struct
6019 * This function sends a Page 0 inquiry to the adapter
6020 * to retrieve supported inquiry pages.
6023 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6025 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6027 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6032 /* Grab the type out of the VPD and store it away */
6033 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6035 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6037 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6039 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6040 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6041 sizeof(struct ipr_inquiry_page0));
6044 return IPR_RC_JOB_RETURN;
6048 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6049 * @ipr_cmd: ipr command struct
6051 * This function sends a standard inquiry to the adapter.
6056 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6058 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6061 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6063 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6064 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6065 sizeof(struct ipr_ioa_vpd));
6068 return IPR_RC_JOB_RETURN;
6072 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6073 * @ipr_cmd: ipr command struct
6075 * This function send an Identify Host Request Response Queue
6076 * command to establish the HRRQ with the adapter.
6081 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6083 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6084 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6087 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6089 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6090 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6092 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6093 ioarcb->cmd_pkt.cdb[2] =
6094 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6095 ioarcb->cmd_pkt.cdb[3] =
6096 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6097 ioarcb->cmd_pkt.cdb[4] =
6098 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6099 ioarcb->cmd_pkt.cdb[5] =
6100 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6101 ioarcb->cmd_pkt.cdb[7] =
6102 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6103 ioarcb->cmd_pkt.cdb[8] =
6104 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6106 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6108 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6111 return IPR_RC_JOB_RETURN;
6115 * ipr_reset_timer_done - Adapter reset timer function
6116 * @ipr_cmd: ipr command struct
6118 * Description: This function is used in adapter reset processing
6119 * for timing events. If the reset_cmd pointer in the IOA
6120 * config struct is not this adapter's we are doing nested
6121 * resets and fail_all_ops will take care of freeing the
6127 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6129 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6130 unsigned long lock_flags = 0;
6132 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6134 if (ioa_cfg->reset_cmd == ipr_cmd) {
6135 list_del(&ipr_cmd->queue);
6136 ipr_cmd->done(ipr_cmd);
6139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6143 * ipr_reset_start_timer - Start a timer for adapter reset job
6144 * @ipr_cmd: ipr command struct
6145 * @timeout: timeout value
6147 * Description: This function is used in adapter reset processing
6148 * for timing events. If the reset_cmd pointer in the IOA
6149 * config struct is not this adapter's we are doing nested
6150 * resets and fail_all_ops will take care of freeing the
6156 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6157 unsigned long timeout)
6159 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6160 ipr_cmd->done = ipr_reset_ioa_job;
6162 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6163 ipr_cmd->timer.expires = jiffies + timeout;
6164 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6165 add_timer(&ipr_cmd->timer);
6169 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6170 * @ioa_cfg: ioa cfg struct
6175 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6177 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6179 /* Initialize Host RRQ pointers */
6180 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6181 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6182 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6183 ioa_cfg->toggle_bit = 1;
6185 /* Zero out config table */
6186 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6190 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6191 * @ipr_cmd: ipr command struct
6193 * This function reinitializes some control blocks and
6194 * enables destructive diagnostics on the adapter.
6199 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6201 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6202 volatile u32 int_reg;
6205 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6206 ipr_init_ioa_mem(ioa_cfg);
6208 ioa_cfg->allow_interrupts = 1;
6209 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6211 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6212 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6213 ioa_cfg->regs.clr_interrupt_mask_reg);
6214 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6215 return IPR_RC_JOB_CONTINUE;
6218 /* Enable destructive diagnostics on IOA */
6219 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6221 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6222 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6224 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6226 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6227 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6228 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6229 ipr_cmd->done = ipr_reset_ioa_job;
6230 add_timer(&ipr_cmd->timer);
6231 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6234 return IPR_RC_JOB_RETURN;
6238 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6239 * @ipr_cmd: ipr command struct
6241 * This function is invoked when an adapter dump has run out
6242 * of processing time.
6245 * IPR_RC_JOB_CONTINUE
6247 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6249 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6251 if (ioa_cfg->sdt_state == GET_DUMP)
6252 ioa_cfg->sdt_state = ABORT_DUMP;
6254 ipr_cmd->job_step = ipr_reset_alert;
6256 return IPR_RC_JOB_CONTINUE;
6260 * ipr_unit_check_no_data - Log a unit check/no data error log
6261 * @ioa_cfg: ioa config struct
6263 * Logs an error indicating the adapter unit checked, but for some
6264 * reason, we were unable to fetch the unit check buffer.
6269 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6271 ioa_cfg->errors_logged++;
6272 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6276 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6277 * @ioa_cfg: ioa config struct
6279 * Fetches the unit check buffer from the adapter by clocking the data
6280 * through the mailbox register.
6285 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6287 unsigned long mailbox;
6288 struct ipr_hostrcb *hostrcb;
6289 struct ipr_uc_sdt sdt;
6292 mailbox = readl(ioa_cfg->ioa_mailbox);
6294 if (!ipr_sdt_is_fmt2(mailbox)) {
6295 ipr_unit_check_no_data(ioa_cfg);
6299 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6300 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6301 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6303 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6304 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6305 ipr_unit_check_no_data(ioa_cfg);
6309 /* Find length of the first sdt entry (UC buffer) */
6310 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6311 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6313 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6314 struct ipr_hostrcb, queue);
6315 list_del(&hostrcb->queue);
6316 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6318 rc = ipr_get_ldump_data_section(ioa_cfg,
6319 be32_to_cpu(sdt.entry[0].bar_str_offset),
6320 (__be32 *)&hostrcb->hcam,
6321 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6324 ipr_handle_log_data(ioa_cfg, hostrcb);
6326 ipr_unit_check_no_data(ioa_cfg);
6328 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6332 * ipr_reset_restore_cfg_space - Restore PCI config space.
6333 * @ipr_cmd: ipr command struct
6335 * Description: This function restores the saved PCI config space of
6336 * the adapter, fails all outstanding ops back to the callers, and
6337 * fetches the dump/unit check if applicable to this reset.
6340 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6342 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6344 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6348 rc = pci_restore_state(ioa_cfg->pdev);
6350 if (rc != PCIBIOS_SUCCESSFUL) {
6351 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6352 return IPR_RC_JOB_CONTINUE;
6355 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6356 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6357 return IPR_RC_JOB_CONTINUE;
6360 ipr_fail_all_ops(ioa_cfg);
6362 if (ioa_cfg->ioa_unit_checked) {
6363 ioa_cfg->ioa_unit_checked = 0;
6364 ipr_get_unit_check_buffer(ioa_cfg);
6365 ipr_cmd->job_step = ipr_reset_alert;
6366 ipr_reset_start_timer(ipr_cmd, 0);
6367 return IPR_RC_JOB_RETURN;
6370 if (ioa_cfg->in_ioa_bringdown) {
6371 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6373 ipr_cmd->job_step = ipr_reset_enable_ioa;
6375 if (GET_DUMP == ioa_cfg->sdt_state) {
6376 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6377 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6378 schedule_work(&ioa_cfg->work_q);
6379 return IPR_RC_JOB_RETURN;
6384 return IPR_RC_JOB_CONTINUE;
6388 * ipr_reset_bist_done - BIST has completed on the adapter.
6389 * @ipr_cmd: ipr command struct
6391 * Description: Unblock config space and resume the reset process.
6394 * IPR_RC_JOB_CONTINUE
6396 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6399 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6400 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6402 return IPR_RC_JOB_CONTINUE;
6406 * ipr_reset_start_bist - Run BIST on the adapter.
6407 * @ipr_cmd: ipr command struct
6409 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6412 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6414 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6416 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6420 pci_block_user_cfg_access(ioa_cfg->pdev);
6421 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6423 if (rc != PCIBIOS_SUCCESSFUL) {
6424 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6425 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6426 rc = IPR_RC_JOB_CONTINUE;
6428 ipr_cmd->job_step = ipr_reset_bist_done;
6429 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6430 rc = IPR_RC_JOB_RETURN;
6438 * ipr_reset_allowed - Query whether or not IOA can be reset
6439 * @ioa_cfg: ioa config struct
6442 * 0 if reset not allowed / non-zero if reset is allowed
6444 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6446 volatile u32 temp_reg;
6448 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6449 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6453 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6454 * @ipr_cmd: ipr command struct
6456 * Description: This function waits for adapter permission to run BIST,
6457 * then runs BIST. If the adapter does not give permission after a
6458 * reasonable time, we will reset the adapter anyway. The impact of
6459 * resetting the adapter without warning the adapter is the risk of
6460 * losing the persistent error log on the adapter. If the adapter is
6461 * reset while it is writing to the flash on the adapter, the flash
6462 * segment will have bad ECC and be zeroed.
6465 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6467 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6469 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6470 int rc = IPR_RC_JOB_RETURN;
6472 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6473 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6474 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6476 ipr_cmd->job_step = ipr_reset_start_bist;
6477 rc = IPR_RC_JOB_CONTINUE;
6484 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6485 * @ipr_cmd: ipr command struct
6487 * Description: This function alerts the adapter that it will be reset.
6488 * If memory space is not currently enabled, proceed directly
6489 * to running BIST on the adapter. The timer must always be started
6490 * so we guarantee we do not run BIST from ipr_isr.
6495 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6497 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6502 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6504 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6505 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6506 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6507 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6509 ipr_cmd->job_step = ipr_reset_start_bist;
6512 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6513 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6516 return IPR_RC_JOB_RETURN;
6520 * ipr_reset_ucode_download_done - Microcode download completion
6521 * @ipr_cmd: ipr command struct
6523 * Description: This function unmaps the microcode download buffer.
6526 * IPR_RC_JOB_CONTINUE
6528 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6530 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6531 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6533 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6534 sglist->num_sg, DMA_TO_DEVICE);
6536 ipr_cmd->job_step = ipr_reset_alert;
6537 return IPR_RC_JOB_CONTINUE;
6541 * ipr_reset_ucode_download - Download microcode to the adapter
6542 * @ipr_cmd: ipr command struct
6544 * Description: This function checks to see if it there is microcode
6545 * to download to the adapter. If there is, a download is performed.
6548 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6550 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6553 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6556 ipr_cmd->job_step = ipr_reset_alert;
6559 return IPR_RC_JOB_CONTINUE;
6561 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6562 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6563 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6564 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6565 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6566 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6567 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6569 ipr_build_ucode_ioadl(ipr_cmd, sglist);
6570 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6572 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6573 IPR_WRITE_BUFFER_TIMEOUT);
6576 return IPR_RC_JOB_RETURN;
6580 * ipr_reset_shutdown_ioa - Shutdown the adapter
6581 * @ipr_cmd: ipr command struct
6583 * Description: This function issues an adapter shutdown of the
6584 * specified type to the specified adapter as part of the
6585 * adapter reset job.
6588 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6590 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6592 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6593 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6594 unsigned long timeout;
6595 int rc = IPR_RC_JOB_CONTINUE;
6598 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6599 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6600 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6601 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6602 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6604 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6605 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6606 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6607 timeout = IPR_INTERNAL_TIMEOUT;
6609 timeout = IPR_SHUTDOWN_TIMEOUT;
6611 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6613 rc = IPR_RC_JOB_RETURN;
6614 ipr_cmd->job_step = ipr_reset_ucode_download;
6616 ipr_cmd->job_step = ipr_reset_alert;
6623 * ipr_reset_ioa_job - Adapter reset job
6624 * @ipr_cmd: ipr command struct
6626 * Description: This function is the job router for the adapter reset job.
6631 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6634 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6637 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6639 if (ioa_cfg->reset_cmd != ipr_cmd) {
6641 * We are doing nested adapter resets and this is
6642 * not the current reset job.
6644 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6648 if (IPR_IOASC_SENSE_KEY(ioasc)) {
6649 rc = ipr_cmd->job_step_failed(ipr_cmd);
6650 if (rc == IPR_RC_JOB_RETURN)
6654 ipr_reinit_ipr_cmnd(ipr_cmd);
6655 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6656 rc = ipr_cmd->job_step(ipr_cmd);
6657 } while(rc == IPR_RC_JOB_CONTINUE);
6661 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6662 * @ioa_cfg: ioa config struct
6663 * @job_step: first job step of reset job
6664 * @shutdown_type: shutdown type
6666 * Description: This function will initiate the reset of the given adapter
6667 * starting at the selected job step.
6668 * If the caller needs to wait on the completion of the reset,
6669 * the caller must sleep on the reset_wait_q.
6674 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6675 int (*job_step) (struct ipr_cmnd *),
6676 enum ipr_shutdown_type shutdown_type)
6678 struct ipr_cmnd *ipr_cmd;
6680 ioa_cfg->in_reset_reload = 1;
6681 ioa_cfg->allow_cmds = 0;
6682 scsi_block_requests(ioa_cfg->host);
6684 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6685 ioa_cfg->reset_cmd = ipr_cmd;
6686 ipr_cmd->job_step = job_step;
6687 ipr_cmd->u.shutdown_type = shutdown_type;
6689 ipr_reset_ioa_job(ipr_cmd);
6693 * ipr_initiate_ioa_reset - Initiate an adapter reset
6694 * @ioa_cfg: ioa config struct
6695 * @shutdown_type: shutdown type
6697 * Description: This function will initiate the reset of the given adapter.
6698 * If the caller needs to wait on the completion of the reset,
6699 * the caller must sleep on the reset_wait_q.
6704 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6705 enum ipr_shutdown_type shutdown_type)
6707 if (ioa_cfg->ioa_is_dead)
6710 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6711 ioa_cfg->sdt_state = ABORT_DUMP;
6713 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6714 dev_err(&ioa_cfg->pdev->dev,
6715 "IOA taken offline - error recovery failed\n");
6717 ioa_cfg->reset_retries = 0;
6718 ioa_cfg->ioa_is_dead = 1;
6720 if (ioa_cfg->in_ioa_bringdown) {
6721 ioa_cfg->reset_cmd = NULL;
6722 ioa_cfg->in_reset_reload = 0;
6723 ipr_fail_all_ops(ioa_cfg);
6724 wake_up_all(&ioa_cfg->reset_wait_q);
6726 spin_unlock_irq(ioa_cfg->host->host_lock);
6727 scsi_unblock_requests(ioa_cfg->host);
6728 spin_lock_irq(ioa_cfg->host->host_lock);
6731 ioa_cfg->in_ioa_bringdown = 1;
6732 shutdown_type = IPR_SHUTDOWN_NONE;
6736 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6741 * ipr_reset_freeze - Hold off all I/O activity
6742 * @ipr_cmd: ipr command struct
6744 * Description: If the PCI slot is frozen, hold off all I/O
6745 * activity; then, as soon as the slot is available again,
6746 * initiate an adapter reset.
6748 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6750 /* Disallow new interrupts, avoid loop */
6751 ipr_cmd->ioa_cfg->allow_interrupts = 0;
6752 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6753 ipr_cmd->done = ipr_reset_ioa_job;
6754 return IPR_RC_JOB_RETURN;
6758 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6759 * @pdev: PCI device struct
6761 * Description: This routine is called to tell us that the PCI bus
6762 * is down. Can't do anything here, except put the device driver
6763 * into a holding pattern, waiting for the PCI bus to come back.
6765 static void ipr_pci_frozen(struct pci_dev *pdev)
6767 unsigned long flags = 0;
6768 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6770 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6771 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6772 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6776 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6777 * @pdev: PCI device struct
6779 * Description: This routine is called by the pci error recovery
6780 * code after the PCI slot has been reset, just before we
6781 * should resume normal operations.
6783 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6785 unsigned long flags = 0;
6786 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6788 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6789 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6792 return PCI_ERS_RESULT_RECOVERED;
6796 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6797 * @pdev: PCI device struct
6799 * Description: This routine is called when the PCI bus has
6800 * permanently failed.
6802 static void ipr_pci_perm_failure(struct pci_dev *pdev)
6804 unsigned long flags = 0;
6805 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6807 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6808 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6809 ioa_cfg->sdt_state = ABORT_DUMP;
6810 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6811 ioa_cfg->in_ioa_bringdown = 1;
6812 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6817 * ipr_pci_error_detected - Called when a PCI error is detected.
6818 * @pdev: PCI device struct
6819 * @state: PCI channel state
6821 * Description: Called when a PCI error is detected.
6824 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6826 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6827 pci_channel_state_t state)
6830 case pci_channel_io_frozen:
6831 ipr_pci_frozen(pdev);
6832 return PCI_ERS_RESULT_NEED_RESET;
6833 case pci_channel_io_perm_failure:
6834 ipr_pci_perm_failure(pdev);
6835 return PCI_ERS_RESULT_DISCONNECT;
6840 return PCI_ERS_RESULT_NEED_RESET;
6844 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6845 * @ioa_cfg: ioa cfg struct
6847 * Description: This is the second phase of adapter intialization
6848 * This function takes care of initilizing the adapter to the point
6849 * where it can accept new commands.
6852 * 0 on sucess / -EIO on failure
6854 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6857 unsigned long host_lock_flags = 0;
6860 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6861 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6862 if (ioa_cfg->needs_hard_reset) {
6863 ioa_cfg->needs_hard_reset = 0;
6864 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6866 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6869 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6870 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6871 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6873 if (ioa_cfg->ioa_is_dead) {
6875 } else if (ipr_invalid_adapter(ioa_cfg)) {
6879 dev_err(&ioa_cfg->pdev->dev,
6880 "Adapter not supported in this hardware configuration.\n");
6883 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6890 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6891 * @ioa_cfg: ioa config struct
6896 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6900 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6901 if (ioa_cfg->ipr_cmnd_list[i])
6902 pci_pool_free(ioa_cfg->ipr_cmd_pool,
6903 ioa_cfg->ipr_cmnd_list[i],
6904 ioa_cfg->ipr_cmnd_list_dma[i]);
6906 ioa_cfg->ipr_cmnd_list[i] = NULL;
6909 if (ioa_cfg->ipr_cmd_pool)
6910 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6912 ioa_cfg->ipr_cmd_pool = NULL;
6916 * ipr_free_mem - Frees memory allocated for an adapter
6917 * @ioa_cfg: ioa cfg struct
6922 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6926 kfree(ioa_cfg->res_entries);
6927 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6928 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6929 ipr_free_cmd_blks(ioa_cfg);
6930 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6931 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6932 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6934 ioa_cfg->cfg_table_dma);
6936 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6937 pci_free_consistent(ioa_cfg->pdev,
6938 sizeof(struct ipr_hostrcb),
6939 ioa_cfg->hostrcb[i],
6940 ioa_cfg->hostrcb_dma[i]);
6943 ipr_free_dump(ioa_cfg);
6944 kfree(ioa_cfg->trace);
6948 * ipr_free_all_resources - Free all allocated resources for an adapter.
6949 * @ipr_cmd: ipr command struct
6951 * This function frees all allocated resources for the
6952 * specified adapter.
6957 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6959 struct pci_dev *pdev = ioa_cfg->pdev;
6962 free_irq(pdev->irq, ioa_cfg);
6963 iounmap(ioa_cfg->hdw_dma_regs);
6964 pci_release_regions(pdev);
6965 ipr_free_mem(ioa_cfg);
6966 scsi_host_put(ioa_cfg->host);
6967 pci_disable_device(pdev);
6972 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6973 * @ioa_cfg: ioa config struct
6976 * 0 on success / -ENOMEM on allocation failure
6978 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6980 struct ipr_cmnd *ipr_cmd;
6981 struct ipr_ioarcb *ioarcb;
6982 dma_addr_t dma_addr;
6985 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6986 sizeof(struct ipr_cmnd), 8, 0);
6988 if (!ioa_cfg->ipr_cmd_pool)
6991 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6992 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
6995 ipr_free_cmd_blks(ioa_cfg);
6999 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7000 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7001 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7003 ioarcb = &ipr_cmd->ioarcb;
7004 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7005 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7006 ioarcb->write_ioadl_addr =
7007 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7008 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7009 ioarcb->ioasa_host_pci_addr =
7010 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7011 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7012 ipr_cmd->cmd_index = i;
7013 ipr_cmd->ioa_cfg = ioa_cfg;
7014 ipr_cmd->sense_buffer_dma = dma_addr +
7015 offsetof(struct ipr_cmnd, sense_buffer);
7017 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7024 * ipr_alloc_mem - Allocate memory for an adapter
7025 * @ioa_cfg: ioa config struct
7028 * 0 on success / non-zero for error
7030 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7032 struct pci_dev *pdev = ioa_cfg->pdev;
7033 int i, rc = -ENOMEM;
7036 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7037 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7039 if (!ioa_cfg->res_entries)
7042 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7043 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7045 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7046 sizeof(struct ipr_misc_cbs),
7047 &ioa_cfg->vpd_cbs_dma);
7049 if (!ioa_cfg->vpd_cbs)
7050 goto out_free_res_entries;
7052 if (ipr_alloc_cmd_blks(ioa_cfg))
7053 goto out_free_vpd_cbs;
7055 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7056 sizeof(u32) * IPR_NUM_CMD_BLKS,
7057 &ioa_cfg->host_rrq_dma);
7059 if (!ioa_cfg->host_rrq)
7060 goto out_ipr_free_cmd_blocks;
7062 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7063 sizeof(struct ipr_config_table),
7064 &ioa_cfg->cfg_table_dma);
7066 if (!ioa_cfg->cfg_table)
7067 goto out_free_host_rrq;
7069 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7070 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7071 sizeof(struct ipr_hostrcb),
7072 &ioa_cfg->hostrcb_dma[i]);
7074 if (!ioa_cfg->hostrcb[i])
7075 goto out_free_hostrcb_dma;
7077 ioa_cfg->hostrcb[i]->hostrcb_dma =
7078 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7079 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7080 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7083 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7084 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7086 if (!ioa_cfg->trace)
7087 goto out_free_hostrcb_dma;
7094 out_free_hostrcb_dma:
7096 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7097 ioa_cfg->hostrcb[i],
7098 ioa_cfg->hostrcb_dma[i]);
7100 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7101 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7103 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7104 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7105 out_ipr_free_cmd_blocks:
7106 ipr_free_cmd_blks(ioa_cfg);
7108 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7109 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7110 out_free_res_entries:
7111 kfree(ioa_cfg->res_entries);
7116 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7117 * @ioa_cfg: ioa config struct
7122 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7126 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7127 ioa_cfg->bus_attr[i].bus = i;
7128 ioa_cfg->bus_attr[i].qas_enabled = 0;
7129 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7130 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7131 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7133 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7138 * ipr_init_ioa_cfg - Initialize IOA config struct
7139 * @ioa_cfg: ioa config struct
7140 * @host: scsi host struct
7141 * @pdev: PCI dev struct
7146 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7147 struct Scsi_Host *host, struct pci_dev *pdev)
7149 const struct ipr_interrupt_offsets *p;
7150 struct ipr_interrupts *t;
7153 ioa_cfg->host = host;
7154 ioa_cfg->pdev = pdev;
7155 ioa_cfg->log_level = ipr_log_level;
7156 ioa_cfg->doorbell = IPR_DOORBELL;
7157 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7158 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7159 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7160 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7161 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7162 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7163 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7164 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7166 INIT_LIST_HEAD(&ioa_cfg->free_q);
7167 INIT_LIST_HEAD(&ioa_cfg->pending_q);
7168 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7169 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7170 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7171 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7172 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7173 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7174 ioa_cfg->sdt_state = INACTIVE;
7175 if (ipr_enable_cache)
7176 ioa_cfg->cache_state = CACHE_ENABLED;
7178 ioa_cfg->cache_state = CACHE_DISABLED;
7180 ipr_initialize_bus_attr(ioa_cfg);
7182 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7183 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7184 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7185 host->unique_id = host->host_no;
7186 host->max_cmd_len = IPR_MAX_CDB_LEN;
7187 pci_set_drvdata(pdev, ioa_cfg);
7189 p = &ioa_cfg->chip_cfg->regs;
7191 base = ioa_cfg->hdw_dma_regs;
7193 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7194 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7195 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7196 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7197 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7198 t->ioarrin_reg = base + p->ioarrin_reg;
7199 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7200 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7201 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7205 * ipr_get_chip_cfg - Find adapter chip configuration
7206 * @dev_id: PCI device id struct
7209 * ptr to chip config on success / NULL on failure
7211 static const struct ipr_chip_cfg_t * __devinit
7212 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7216 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7217 if (ipr_chip[i].vendor == dev_id->vendor &&
7218 ipr_chip[i].device == dev_id->device)
7219 return ipr_chip[i].cfg;
7224 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7225 * @pdev: PCI device struct
7226 * @dev_id: PCI device id struct
7229 * 0 on success / non-zero on failure
7231 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7232 const struct pci_device_id *dev_id)
7234 struct ipr_ioa_cfg *ioa_cfg;
7235 struct Scsi_Host *host;
7236 unsigned long ipr_regs_pci;
7237 void __iomem *ipr_regs;
7238 int rc = PCIBIOS_SUCCESSFUL;
7239 volatile u32 mask, uproc;
7243 if ((rc = pci_enable_device(pdev))) {
7244 dev_err(&pdev->dev, "Cannot enable adapter\n");
7248 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7250 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7253 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7258 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7259 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7260 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7261 sata_port_info.flags, &ipr_sata_ops);
7263 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7265 if (!ioa_cfg->chip_cfg) {
7266 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7267 dev_id->vendor, dev_id->device);
7268 goto out_scsi_host_put;
7271 if (ipr_transop_timeout)
7272 ioa_cfg->transop_timeout = ipr_transop_timeout;
7273 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7274 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7276 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7278 ipr_regs_pci = pci_resource_start(pdev, 0);
7280 rc = pci_request_regions(pdev, IPR_NAME);
7283 "Couldn't register memory range of registers\n");
7284 goto out_scsi_host_put;
7287 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7291 "Couldn't map memory range of registers\n");
7293 goto out_release_regions;
7296 ioa_cfg->hdw_dma_regs = ipr_regs;
7297 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7298 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7300 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7302 pci_set_master(pdev);
7304 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7306 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7310 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7311 ioa_cfg->chip_cfg->cache_line_size);
7313 if (rc != PCIBIOS_SUCCESSFUL) {
7314 dev_err(&pdev->dev, "Write of cache line size failed\n");
7319 /* Save away PCI config space for use following IOA reset */
7320 rc = pci_save_state(pdev);
7322 if (rc != PCIBIOS_SUCCESSFUL) {
7323 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7328 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7331 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7334 rc = ipr_alloc_mem(ioa_cfg);
7337 "Couldn't allocate enough memory for device driver!\n");
7342 * If HRRQ updated interrupt is not masked, or reset alert is set,
7343 * the card is in an unknown state and needs a hard reset
7345 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7346 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7347 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7348 ioa_cfg->needs_hard_reset = 1;
7350 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7351 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7354 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7359 spin_lock(&ipr_driver_lock);
7360 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7361 spin_unlock(&ipr_driver_lock);
7368 ipr_free_mem(ioa_cfg);
7371 out_release_regions:
7372 pci_release_regions(pdev);
7374 scsi_host_put(host);
7376 pci_disable_device(pdev);
7381 * ipr_scan_vsets - Scans for VSET devices
7382 * @ioa_cfg: ioa config struct
7384 * Description: Since the VSET resources do not follow SAM in that we can have
7385 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7390 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7394 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7395 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7396 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7400 * ipr_initiate_ioa_bringdown - Bring down an adapter
7401 * @ioa_cfg: ioa config struct
7402 * @shutdown_type: shutdown type
7404 * Description: This function will initiate bringing down the adapter.
7405 * This consists of issuing an IOA shutdown to the adapter
7406 * to flush the cache, and running BIST.
7407 * If the caller needs to wait on the completion of the reset,
7408 * the caller must sleep on the reset_wait_q.
7413 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7414 enum ipr_shutdown_type shutdown_type)
7417 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7418 ioa_cfg->sdt_state = ABORT_DUMP;
7419 ioa_cfg->reset_retries = 0;
7420 ioa_cfg->in_ioa_bringdown = 1;
7421 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7426 * __ipr_remove - Remove a single adapter
7427 * @pdev: pci device struct
7429 * Adapter hot plug remove entry point.
7434 static void __ipr_remove(struct pci_dev *pdev)
7436 unsigned long host_lock_flags = 0;
7437 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7440 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7441 while(ioa_cfg->in_reset_reload) {
7442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7443 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7444 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7447 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7450 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7451 flush_scheduled_work();
7452 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7454 spin_lock(&ipr_driver_lock);
7455 list_del(&ioa_cfg->queue);
7456 spin_unlock(&ipr_driver_lock);
7458 if (ioa_cfg->sdt_state == ABORT_DUMP)
7459 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7460 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7462 ipr_free_all_resources(ioa_cfg);
7468 * ipr_remove - IOA hot plug remove entry point
7469 * @pdev: pci device struct
7471 * Adapter hot plug remove entry point.
7476 static void ipr_remove(struct pci_dev *pdev)
7478 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7482 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7484 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7486 scsi_remove_host(ioa_cfg->host);
7494 * ipr_probe - Adapter hot plug add entry point
7497 * 0 on success / non-zero on failure
7499 static int __devinit ipr_probe(struct pci_dev *pdev,
7500 const struct pci_device_id *dev_id)
7502 struct ipr_ioa_cfg *ioa_cfg;
7505 rc = ipr_probe_ioa(pdev, dev_id);
7510 ioa_cfg = pci_get_drvdata(pdev);
7511 rc = ipr_probe_ioa_part2(ioa_cfg);
7518 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7525 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7529 scsi_remove_host(ioa_cfg->host);
7534 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7538 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7540 scsi_remove_host(ioa_cfg->host);
7545 scsi_scan_host(ioa_cfg->host);
7546 ipr_scan_vsets(ioa_cfg);
7547 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7548 ioa_cfg->allow_ml_add_del = 1;
7549 ioa_cfg->host->max_channel = IPR_VSET_BUS;
7550 schedule_work(&ioa_cfg->work_q);
7555 * ipr_shutdown - Shutdown handler.
7556 * @pdev: pci device struct
7558 * This function is invoked upon system shutdown/reboot. It will issue
7559 * an adapter shutdown to the adapter to flush the write cache.
7564 static void ipr_shutdown(struct pci_dev *pdev)
7566 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7567 unsigned long lock_flags = 0;
7569 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7570 while(ioa_cfg->in_reset_reload) {
7571 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7572 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7573 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7576 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7577 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7578 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7581 static struct pci_device_id ipr_pci_table[] __devinitdata = {
7582 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7583 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7584 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7585 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7586 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7587 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7588 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7589 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7590 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7591 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7592 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7593 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7594 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7595 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7596 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7597 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7598 IPR_USE_LONG_TRANSOP_TIMEOUT },
7599 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7600 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7601 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7602 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7603 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7604 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7605 IPR_USE_LONG_TRANSOP_TIMEOUT },
7606 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7607 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7608 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7609 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7610 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7611 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7612 IPR_USE_LONG_TRANSOP_TIMEOUT },
7613 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7614 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
7615 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7616 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7617 IPR_USE_LONG_TRANSOP_TIMEOUT },
7618 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7619 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7620 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7621 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7622 IPR_USE_LONG_TRANSOP_TIMEOUT },
7623 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7624 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7625 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7626 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7627 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7628 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7629 IPR_USE_LONG_TRANSOP_TIMEOUT },
7630 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7631 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7632 IPR_USE_LONG_TRANSOP_TIMEOUT },
7633 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7634 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7635 IPR_USE_LONG_TRANSOP_TIMEOUT },
7638 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7640 static struct pci_error_handlers ipr_err_handler = {
7641 .error_detected = ipr_pci_error_detected,
7642 .slot_reset = ipr_pci_slot_reset,
7645 static struct pci_driver ipr_driver = {
7647 .id_table = ipr_pci_table,
7649 .remove = ipr_remove,
7650 .shutdown = ipr_shutdown,
7651 .err_handler = &ipr_err_handler,
7652 .dynids.use_driver_data = 1
7656 * ipr_init - Module entry point
7659 * 0 on success / negative value on failure
7661 static int __init ipr_init(void)
7663 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7664 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7666 return pci_register_driver(&ipr_driver);
7670 * ipr_exit - Module unload
7672 * Module unload entry point.
7677 static void __exit ipr_exit(void)
7679 pci_unregister_driver(&ipr_driver);
7682 module_init(ipr_init);
7683 module_exit(ipr_exit);