Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_attr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13
14 static int qla24xx_vport_disable(struct fc_vport *, bool);
15 static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
16 int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17 static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
18 /* SYSFS attributes --------------------------------------------------------- */
19
20 static ssize_t
21 qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
22                            struct bin_attribute *bin_attr,
23                            char *buf, loff_t off, size_t count)
24 {
25         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
26             struct device, kobj)));
27         struct qla_hw_data *ha = vha->hw;
28
29         if (ha->fw_dump_reading == 0)
30                 return 0;
31
32         return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
33                                         ha->fw_dump_len);
34 }
35
36 static ssize_t
37 qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
38                             struct bin_attribute *bin_attr,
39                             char *buf, loff_t off, size_t count)
40 {
41         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
42             struct device, kobj)));
43         struct qla_hw_data *ha = vha->hw;
44         int reading;
45
46         if (off != 0)
47                 return (0);
48
49         reading = simple_strtol(buf, NULL, 10);
50         switch (reading) {
51         case 0:
52                 if (!ha->fw_dump_reading)
53                         break;
54
55                 qla_printk(KERN_INFO, ha,
56                     "Firmware dump cleared on (%ld).\n", vha->host_no);
57
58                 ha->fw_dump_reading = 0;
59                 ha->fw_dumped = 0;
60                 break;
61         case 1:
62                 if (ha->fw_dumped && !ha->fw_dump_reading) {
63                         ha->fw_dump_reading = 1;
64
65                         qla_printk(KERN_INFO, ha,
66                             "Raw firmware dump ready for read on (%ld).\n",
67                             vha->host_no);
68                 }
69                 break;
70         case 2:
71                 qla2x00_alloc_fw_dump(vha);
72                 break;
73         case 3:
74                 qla2x00_system_error(vha);
75                 break;
76         }
77         return (count);
78 }
79
80 static struct bin_attribute sysfs_fw_dump_attr = {
81         .attr = {
82                 .name = "fw_dump",
83                 .mode = S_IRUSR | S_IWUSR,
84         },
85         .size = 0,
86         .read = qla2x00_sysfs_read_fw_dump,
87         .write = qla2x00_sysfs_write_fw_dump,
88 };
89
90 static ssize_t
91 qla2x00_sysfs_read_nvram(struct kobject *kobj,
92                          struct bin_attribute *bin_attr,
93                          char *buf, loff_t off, size_t count)
94 {
95         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
96             struct device, kobj)));
97         struct qla_hw_data *ha = vha->hw;
98
99         if (!capable(CAP_SYS_ADMIN))
100                 return 0;
101
102         if (IS_NOCACHE_VPD_TYPE(ha))
103                 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
104                     ha->nvram_size);
105         return memory_read_from_buffer(buf, count, &off, ha->nvram,
106                                         ha->nvram_size);
107 }
108
109 static ssize_t
110 qla2x00_sysfs_write_nvram(struct kobject *kobj,
111                           struct bin_attribute *bin_attr,
112                           char *buf, loff_t off, size_t count)
113 {
114         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
115             struct device, kobj)));
116         struct qla_hw_data *ha = vha->hw;
117         uint16_t        cnt;
118
119         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
120             !ha->isp_ops->write_nvram)
121                 return 0;
122
123         /* Checksum NVRAM. */
124         if (IS_FWI2_CAPABLE(ha)) {
125                 uint32_t *iter;
126                 uint32_t chksum;
127
128                 iter = (uint32_t *)buf;
129                 chksum = 0;
130                 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
131                         chksum += le32_to_cpu(*iter++);
132                 chksum = ~chksum + 1;
133                 *iter = cpu_to_le32(chksum);
134         } else {
135                 uint8_t *iter;
136                 uint8_t chksum;
137
138                 iter = (uint8_t *)buf;
139                 chksum = 0;
140                 for (cnt = 0; cnt < count - 1; cnt++)
141                         chksum += *iter++;
142                 chksum = ~chksum + 1;
143                 *iter = chksum;
144         }
145
146         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
147                 qla_printk(KERN_WARNING, ha,
148                     "HBA not online, failing NVRAM update.\n");
149                 return -EAGAIN;
150         }
151
152         /* Write NVRAM. */
153         ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
154         ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
155             count);
156
157         /* NVRAM settings take effect immediately. */
158         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
159         qla2xxx_wake_dpc(vha);
160         qla2x00_wait_for_chip_reset(vha);
161
162         return (count);
163 }
164
165 static struct bin_attribute sysfs_nvram_attr = {
166         .attr = {
167                 .name = "nvram",
168                 .mode = S_IRUSR | S_IWUSR,
169         },
170         .size = 512,
171         .read = qla2x00_sysfs_read_nvram,
172         .write = qla2x00_sysfs_write_nvram,
173 };
174
175 static ssize_t
176 qla2x00_sysfs_read_optrom(struct kobject *kobj,
177                           struct bin_attribute *bin_attr,
178                           char *buf, loff_t off, size_t count)
179 {
180         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
181             struct device, kobj)));
182         struct qla_hw_data *ha = vha->hw;
183
184         if (ha->optrom_state != QLA_SREADING)
185                 return 0;
186
187         return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
188                                         ha->optrom_region_size);
189 }
190
191 static ssize_t
192 qla2x00_sysfs_write_optrom(struct kobject *kobj,
193                            struct bin_attribute *bin_attr,
194                            char *buf, loff_t off, size_t count)
195 {
196         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
197             struct device, kobj)));
198         struct qla_hw_data *ha = vha->hw;
199
200         if (ha->optrom_state != QLA_SWRITING)
201                 return -EINVAL;
202         if (off > ha->optrom_region_size)
203                 return -ERANGE;
204         if (off + count > ha->optrom_region_size)
205                 count = ha->optrom_region_size - off;
206
207         memcpy(&ha->optrom_buffer[off], buf, count);
208
209         return count;
210 }
211
212 static struct bin_attribute sysfs_optrom_attr = {
213         .attr = {
214                 .name = "optrom",
215                 .mode = S_IRUSR | S_IWUSR,
216         },
217         .size = 0,
218         .read = qla2x00_sysfs_read_optrom,
219         .write = qla2x00_sysfs_write_optrom,
220 };
221
222 static ssize_t
223 qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
224                                struct bin_attribute *bin_attr,
225                                char *buf, loff_t off, size_t count)
226 {
227         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
228             struct device, kobj)));
229         struct qla_hw_data *ha = vha->hw;
230
231         uint32_t start = 0;
232         uint32_t size = ha->optrom_size;
233         int val, valid;
234
235         if (off)
236                 return 0;
237
238         if (unlikely(pci_channel_offline(ha->pdev)))
239                 return 0;
240
241         if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
242                 return -EINVAL;
243         if (start > ha->optrom_size)
244                 return -EINVAL;
245
246         switch (val) {
247         case 0:
248                 if (ha->optrom_state != QLA_SREADING &&
249                     ha->optrom_state != QLA_SWRITING)
250                         break;
251
252                 ha->optrom_state = QLA_SWAITING;
253
254                 DEBUG2(qla_printk(KERN_INFO, ha,
255                     "Freeing flash region allocation -- 0x%x bytes.\n",
256                     ha->optrom_region_size));
257
258                 vfree(ha->optrom_buffer);
259                 ha->optrom_buffer = NULL;
260                 break;
261         case 1:
262                 if (ha->optrom_state != QLA_SWAITING)
263                         break;
264
265                 ha->optrom_region_start = start;
266                 ha->optrom_region_size = start + size > ha->optrom_size ?
267                     ha->optrom_size - start : size;
268
269                 ha->optrom_state = QLA_SREADING;
270                 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
271                 if (ha->optrom_buffer == NULL) {
272                         qla_printk(KERN_WARNING, ha,
273                             "Unable to allocate memory for optrom retrieval "
274                             "(%x).\n", ha->optrom_region_size);
275
276                         ha->optrom_state = QLA_SWAITING;
277                         return count;
278                 }
279
280                 DEBUG2(qla_printk(KERN_INFO, ha,
281                     "Reading flash region -- 0x%x/0x%x.\n",
282                     ha->optrom_region_start, ha->optrom_region_size));
283
284                 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
285                 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
286                     ha->optrom_region_start, ha->optrom_region_size);
287                 break;
288         case 2:
289                 if (ha->optrom_state != QLA_SWAITING)
290                         break;
291
292                 /*
293                  * We need to be more restrictive on which FLASH regions are
294                  * allowed to be updated via user-space.  Regions accessible
295                  * via this method include:
296                  *
297                  * ISP21xx/ISP22xx/ISP23xx type boards:
298                  *
299                  *      0x000000 -> 0x020000 -- Boot code.
300                  *
301                  * ISP2322/ISP24xx type boards:
302                  *
303                  *      0x000000 -> 0x07ffff -- Boot code.
304                  *      0x080000 -> 0x0fffff -- Firmware.
305                  *
306                  * ISP25xx type boards:
307                  *
308                  *      0x000000 -> 0x07ffff -- Boot code.
309                  *      0x080000 -> 0x0fffff -- Firmware.
310                  *      0x120000 -> 0x12ffff -- VPD and HBA parameters.
311                  */
312                 valid = 0;
313                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
314                         valid = 1;
315                 else if (start == (ha->flt_region_boot * 4) ||
316                     start == (ha->flt_region_fw * 4))
317                         valid = 1;
318                 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
319                     valid = 1;
320                 if (!valid) {
321                         qla_printk(KERN_WARNING, ha,
322                             "Invalid start region 0x%x/0x%x.\n", start, size);
323                         return -EINVAL;
324                 }
325
326                 ha->optrom_region_start = start;
327                 ha->optrom_region_size = start + size > ha->optrom_size ?
328                     ha->optrom_size - start : size;
329
330                 ha->optrom_state = QLA_SWRITING;
331                 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
332                 if (ha->optrom_buffer == NULL) {
333                         qla_printk(KERN_WARNING, ha,
334                             "Unable to allocate memory for optrom update "
335                             "(%x).\n", ha->optrom_region_size);
336
337                         ha->optrom_state = QLA_SWAITING;
338                         return count;
339                 }
340
341                 DEBUG2(qla_printk(KERN_INFO, ha,
342                     "Staging flash region write -- 0x%x/0x%x.\n",
343                     ha->optrom_region_start, ha->optrom_region_size));
344
345                 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
346                 break;
347         case 3:
348                 if (ha->optrom_state != QLA_SWRITING)
349                         break;
350
351                 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
352                         qla_printk(KERN_WARNING, ha,
353                             "HBA not online, failing flash update.\n");
354                         return -EAGAIN;
355                 }
356
357                 DEBUG2(qla_printk(KERN_INFO, ha,
358                     "Writing flash region -- 0x%x/0x%x.\n",
359                     ha->optrom_region_start, ha->optrom_region_size));
360
361                 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
362                     ha->optrom_region_start, ha->optrom_region_size);
363                 break;
364         default:
365                 count = -EINVAL;
366         }
367         return count;
368 }
369
370 static struct bin_attribute sysfs_optrom_ctl_attr = {
371         .attr = {
372                 .name = "optrom_ctl",
373                 .mode = S_IWUSR,
374         },
375         .size = 0,
376         .write = qla2x00_sysfs_write_optrom_ctl,
377 };
378
379 static ssize_t
380 qla2x00_sysfs_read_vpd(struct kobject *kobj,
381                        struct bin_attribute *bin_attr,
382                        char *buf, loff_t off, size_t count)
383 {
384         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
385             struct device, kobj)));
386         struct qla_hw_data *ha = vha->hw;
387
388         if (unlikely(pci_channel_offline(ha->pdev)))
389                 return 0;
390
391         if (!capable(CAP_SYS_ADMIN))
392                 return 0;
393
394         if (IS_NOCACHE_VPD_TYPE(ha))
395                 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
396                     ha->vpd_size);
397         return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
398 }
399
400 static ssize_t
401 qla2x00_sysfs_write_vpd(struct kobject *kobj,
402                         struct bin_attribute *bin_attr,
403                         char *buf, loff_t off, size_t count)
404 {
405         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
406             struct device, kobj)));
407         struct qla_hw_data *ha = vha->hw;
408         uint8_t *tmp_data;
409
410         if (unlikely(pci_channel_offline(ha->pdev)))
411                 return 0;
412
413         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
414             !ha->isp_ops->write_nvram)
415                 return 0;
416
417         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
418                 qla_printk(KERN_WARNING, ha,
419                     "HBA not online, failing VPD update.\n");
420                 return -EAGAIN;
421         }
422
423         /* Write NVRAM. */
424         ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
425         ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
426
427         /* Update flash version information for 4Gb & above. */
428         if (!IS_FWI2_CAPABLE(ha))
429                 goto done;
430
431         tmp_data = vmalloc(256);
432         if (!tmp_data) {
433                 qla_printk(KERN_WARNING, ha,
434                     "Unable to allocate memory for VPD information update.\n");
435                 goto done;
436         }
437         ha->isp_ops->get_flash_version(vha, tmp_data);
438         vfree(tmp_data);
439 done:
440         return count;
441 }
442
443 static struct bin_attribute sysfs_vpd_attr = {
444         .attr = {
445                 .name = "vpd",
446                 .mode = S_IRUSR | S_IWUSR,
447         },
448         .size = 0,
449         .read = qla2x00_sysfs_read_vpd,
450         .write = qla2x00_sysfs_write_vpd,
451 };
452
453 static ssize_t
454 qla2x00_sysfs_read_sfp(struct kobject *kobj,
455                        struct bin_attribute *bin_attr,
456                        char *buf, loff_t off, size_t count)
457 {
458         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
459             struct device, kobj)));
460         struct qla_hw_data *ha = vha->hw;
461         uint16_t iter, addr, offset;
462         int rval;
463
464         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
465                 return 0;
466
467         if (ha->sfp_data)
468                 goto do_read;
469
470         ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
471             &ha->sfp_data_dma);
472         if (!ha->sfp_data) {
473                 qla_printk(KERN_WARNING, ha,
474                     "Unable to allocate memory for SFP read-data.\n");
475                 return 0;
476         }
477
478 do_read:
479         memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
480         addr = 0xa0;
481         for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
482             iter++, offset += SFP_BLOCK_SIZE) {
483                 if (iter == 4) {
484                         /* Skip to next device address. */
485                         addr = 0xa2;
486                         offset = 0;
487                 }
488
489                 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
490                     SFP_BLOCK_SIZE);
491                 if (rval != QLA_SUCCESS) {
492                         qla_printk(KERN_WARNING, ha,
493                             "Unable to read SFP data (%x/%x/%x).\n", rval,
494                             addr, offset);
495                         count = 0;
496                         break;
497                 }
498                 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
499                 buf += SFP_BLOCK_SIZE;
500         }
501
502         return count;
503 }
504
505 static struct bin_attribute sysfs_sfp_attr = {
506         .attr = {
507                 .name = "sfp",
508                 .mode = S_IRUSR | S_IWUSR,
509         },
510         .size = SFP_DEV_SIZE * 2,
511         .read = qla2x00_sysfs_read_sfp,
512 };
513
514 static ssize_t
515 qla2x00_sysfs_write_reset(struct kobject *kobj,
516                         struct bin_attribute *bin_attr,
517                         char *buf, loff_t off, size_t count)
518 {
519         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
520             struct device, kobj)));
521         struct qla_hw_data *ha = vha->hw;
522         int type;
523
524         if (off != 0)
525                 return 0;
526
527         type = simple_strtol(buf, NULL, 10);
528         switch (type) {
529         case 0x2025c:
530                 qla_printk(KERN_INFO, ha,
531                     "Issuing ISP reset on (%ld).\n", vha->host_no);
532
533                 scsi_block_requests(vha->host);
534                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
535                 qla2xxx_wake_dpc(vha);
536                 qla2x00_wait_for_chip_reset(vha);
537                 scsi_unblock_requests(vha->host);
538                 break;
539         case 0x2025d:
540                 if (!IS_QLA81XX(ha))
541                         break;
542
543                 qla_printk(KERN_INFO, ha,
544                     "Issuing MPI reset on (%ld).\n", vha->host_no);
545
546                 /* Make sure FC side is not in reset */
547                 qla2x00_wait_for_hba_online(vha);
548
549                 /* Issue MPI reset */
550                 scsi_block_requests(vha->host);
551                 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
552                         qla_printk(KERN_WARNING, ha,
553                             "MPI reset failed on (%ld).\n", vha->host_no);
554                 scsi_unblock_requests(vha->host);
555                 break;
556         }
557         return count;
558 }
559
560 static struct bin_attribute sysfs_reset_attr = {
561         .attr = {
562                 .name = "reset",
563                 .mode = S_IWUSR,
564         },
565         .size = 0,
566         .write = qla2x00_sysfs_write_reset,
567 };
568
569 static ssize_t
570 qla2x00_sysfs_write_edc(struct kobject *kobj,
571                         struct bin_attribute *bin_attr,
572                         char *buf, loff_t off, size_t count)
573 {
574         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
575             struct device, kobj)));
576         struct qla_hw_data *ha = vha->hw;
577         uint16_t dev, adr, opt, len;
578         int rval;
579
580         ha->edc_data_len = 0;
581
582         if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
583                 return 0;
584
585         if (!ha->edc_data) {
586                 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
587                     &ha->edc_data_dma);
588                 if (!ha->edc_data) {
589                         DEBUG2(qla_printk(KERN_INFO, ha,
590                             "Unable to allocate memory for EDC write.\n"));
591                         return 0;
592                 }
593         }
594
595         dev = le16_to_cpup((void *)&buf[0]);
596         adr = le16_to_cpup((void *)&buf[2]);
597         opt = le16_to_cpup((void *)&buf[4]);
598         len = le16_to_cpup((void *)&buf[6]);
599
600         if (!(opt & BIT_0))
601                 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
602                         return -EINVAL;
603
604         memcpy(ha->edc_data, &buf[8], len);
605
606         rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
607             ha->edc_data, len, opt);
608         if (rval != QLA_SUCCESS) {
609                 DEBUG2(qla_printk(KERN_INFO, ha,
610                     "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
611                     rval, dev, adr, opt, len, *buf));
612                 return 0;
613         }
614
615         return count;
616 }
617
618 static struct bin_attribute sysfs_edc_attr = {
619         .attr = {
620                 .name = "edc",
621                 .mode = S_IWUSR,
622         },
623         .size = 0,
624         .write = qla2x00_sysfs_write_edc,
625 };
626
627 static ssize_t
628 qla2x00_sysfs_write_edc_status(struct kobject *kobj,
629                         struct bin_attribute *bin_attr,
630                         char *buf, loff_t off, size_t count)
631 {
632         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
633             struct device, kobj)));
634         struct qla_hw_data *ha = vha->hw;
635         uint16_t dev, adr, opt, len;
636         int rval;
637
638         ha->edc_data_len = 0;
639
640         if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
641                 return 0;
642
643         if (!ha->edc_data) {
644                 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
645                     &ha->edc_data_dma);
646                 if (!ha->edc_data) {
647                         DEBUG2(qla_printk(KERN_INFO, ha,
648                             "Unable to allocate memory for EDC status.\n"));
649                         return 0;
650                 }
651         }
652
653         dev = le16_to_cpup((void *)&buf[0]);
654         adr = le16_to_cpup((void *)&buf[2]);
655         opt = le16_to_cpup((void *)&buf[4]);
656         len = le16_to_cpup((void *)&buf[6]);
657
658         if (!(opt & BIT_0))
659                 if (len == 0 || len > DMA_POOL_SIZE)
660                         return -EINVAL;
661
662         memset(ha->edc_data, 0, len);
663         rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
664             ha->edc_data, len, opt);
665         if (rval != QLA_SUCCESS) {
666                 DEBUG2(qla_printk(KERN_INFO, ha,
667                     "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
668                     rval, dev, adr, opt, len));
669                 return 0;
670         }
671
672         ha->edc_data_len = len;
673
674         return count;
675 }
676
677 static ssize_t
678 qla2x00_sysfs_read_edc_status(struct kobject *kobj,
679                            struct bin_attribute *bin_attr,
680                            char *buf, loff_t off, size_t count)
681 {
682         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
683             struct device, kobj)));
684         struct qla_hw_data *ha = vha->hw;
685
686         if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
687                 return 0;
688
689         if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
690                 return -EINVAL;
691
692         memcpy(buf, ha->edc_data, ha->edc_data_len);
693
694         return ha->edc_data_len;
695 }
696
697 static struct bin_attribute sysfs_edc_status_attr = {
698         .attr = {
699                 .name = "edc_status",
700                 .mode = S_IRUSR | S_IWUSR,
701         },
702         .size = 0,
703         .write = qla2x00_sysfs_write_edc_status,
704         .read = qla2x00_sysfs_read_edc_status,
705 };
706
707 static ssize_t
708 qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
709                        struct bin_attribute *bin_attr,
710                        char *buf, loff_t off, size_t count)
711 {
712         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
713             struct device, kobj)));
714         struct qla_hw_data *ha = vha->hw;
715         int rval;
716         uint16_t actual_size;
717
718         if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
719                 return 0;
720
721         if (ha->xgmac_data)
722                 goto do_read;
723
724         ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
725             &ha->xgmac_data_dma, GFP_KERNEL);
726         if (!ha->xgmac_data) {
727                 qla_printk(KERN_WARNING, ha,
728                     "Unable to allocate memory for XGMAC read-data.\n");
729                 return 0;
730         }
731
732 do_read:
733         actual_size = 0;
734         memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
735
736         rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
737             XGMAC_DATA_SIZE, &actual_size);
738         if (rval != QLA_SUCCESS) {
739                 qla_printk(KERN_WARNING, ha,
740                     "Unable to read XGMAC data (%x).\n", rval);
741                 count = 0;
742         }
743
744         count = actual_size > count ? count: actual_size;
745         memcpy(buf, ha->xgmac_data, count);
746
747         return count;
748 }
749
750 static struct bin_attribute sysfs_xgmac_stats_attr = {
751         .attr = {
752                 .name = "xgmac_stats",
753                 .mode = S_IRUSR,
754         },
755         .size = 0,
756         .read = qla2x00_sysfs_read_xgmac_stats,
757 };
758
759 static ssize_t
760 qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
761                        struct bin_attribute *bin_attr,
762                        char *buf, loff_t off, size_t count)
763 {
764         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
765             struct device, kobj)));
766         struct qla_hw_data *ha = vha->hw;
767         int rval;
768         uint16_t actual_size;
769
770         if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
771                 return 0;
772
773         if (ha->dcbx_tlv)
774                 goto do_read;
775
776         ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
777             &ha->dcbx_tlv_dma, GFP_KERNEL);
778         if (!ha->dcbx_tlv) {
779                 qla_printk(KERN_WARNING, ha,
780                     "Unable to allocate memory for DCBX TLV read-data.\n");
781                 return 0;
782         }
783
784 do_read:
785         actual_size = 0;
786         memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
787
788         rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
789             DCBX_TLV_DATA_SIZE);
790         if (rval != QLA_SUCCESS) {
791                 qla_printk(KERN_WARNING, ha,
792                     "Unable to read DCBX TLV data (%x).\n", rval);
793                 count = 0;
794         }
795
796         memcpy(buf, ha->dcbx_tlv, count);
797
798         return count;
799 }
800
801 static struct bin_attribute sysfs_dcbx_tlv_attr = {
802         .attr = {
803                 .name = "dcbx_tlv",
804                 .mode = S_IRUSR,
805         },
806         .size = 0,
807         .read = qla2x00_sysfs_read_dcbx_tlv,
808 };
809
810 static struct sysfs_entry {
811         char *name;
812         struct bin_attribute *attr;
813         int is4GBp_only;
814 } bin_file_entries[] = {
815         { "fw_dump", &sysfs_fw_dump_attr, },
816         { "nvram", &sysfs_nvram_attr, },
817         { "optrom", &sysfs_optrom_attr, },
818         { "optrom_ctl", &sysfs_optrom_ctl_attr, },
819         { "vpd", &sysfs_vpd_attr, 1 },
820         { "sfp", &sysfs_sfp_attr, 1 },
821         { "reset", &sysfs_reset_attr, },
822         { "edc", &sysfs_edc_attr, 2 },
823         { "edc_status", &sysfs_edc_status_attr, 2 },
824         { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
825         { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
826         { NULL },
827 };
828
829 void
830 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
831 {
832         struct Scsi_Host *host = vha->host;
833         struct sysfs_entry *iter;
834         int ret;
835
836         for (iter = bin_file_entries; iter->name; iter++) {
837                 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
838                         continue;
839                 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
840                         continue;
841                 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
842                         continue;
843
844                 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
845                     iter->attr);
846                 if (ret)
847                         qla_printk(KERN_INFO, vha->hw,
848                             "Unable to create sysfs %s binary attribute "
849                             "(%d).\n", iter->name, ret);
850         }
851 }
852
853 void
854 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
855 {
856         struct Scsi_Host *host = vha->host;
857         struct sysfs_entry *iter;
858         struct qla_hw_data *ha = vha->hw;
859
860         for (iter = bin_file_entries; iter->name; iter++) {
861                 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
862                         continue;
863                 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
864                         continue;
865                 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
866                         continue;
867
868                 sysfs_remove_bin_file(&host->shost_gendev.kobj,
869                     iter->attr);
870         }
871
872         if (ha->beacon_blink_led == 1)
873                 ha->isp_ops->beacon_off(vha);
874 }
875
876 /* Scsi_Host attributes. */
877
878 static ssize_t
879 qla2x00_drvr_version_show(struct device *dev,
880                           struct device_attribute *attr, char *buf)
881 {
882         return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
883 }
884
885 static ssize_t
886 qla2x00_fw_version_show(struct device *dev,
887                         struct device_attribute *attr, char *buf)
888 {
889         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
890         struct qla_hw_data *ha = vha->hw;
891         char fw_str[128];
892
893         return snprintf(buf, PAGE_SIZE, "%s\n",
894             ha->isp_ops->fw_version_str(vha, fw_str));
895 }
896
897 static ssize_t
898 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
899                         char *buf)
900 {
901         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
902         struct qla_hw_data *ha = vha->hw;
903         uint32_t sn;
904
905         if (IS_FWI2_CAPABLE(ha)) {
906                 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
907                 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
908         }
909
910         sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
911         return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
912             sn % 100000);
913 }
914
915 static ssize_t
916 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
917                       char *buf)
918 {
919         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
920         return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
921 }
922
923 static ssize_t
924 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
925                     char *buf)
926 {
927         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
928         struct qla_hw_data *ha = vha->hw;
929         return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
930             ha->product_id[0], ha->product_id[1], ha->product_id[2],
931             ha->product_id[3]);
932 }
933
934 static ssize_t
935 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
936                         char *buf)
937 {
938         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
939         return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
940 }
941
942 static ssize_t
943 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
944                         char *buf)
945 {
946         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
947         return snprintf(buf, PAGE_SIZE, "%s\n",
948             vha->hw->model_desc ? vha->hw->model_desc : "");
949 }
950
951 static ssize_t
952 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
953                       char *buf)
954 {
955         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
956         char pci_info[30];
957
958         return snprintf(buf, PAGE_SIZE, "%s\n",
959             vha->hw->isp_ops->pci_info_str(vha, pci_info));
960 }
961
962 static ssize_t
963 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
964                         char *buf)
965 {
966         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
967         struct qla_hw_data *ha = vha->hw;
968         int len = 0;
969
970         if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
971             atomic_read(&vha->loop_state) == LOOP_DEAD)
972                 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
973         else if (atomic_read(&vha->loop_state) != LOOP_READY ||
974             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
975             test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
976                 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
977         else {
978                 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
979
980                 switch (ha->current_topology) {
981                 case ISP_CFG_NL:
982                         len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
983                         break;
984                 case ISP_CFG_FL:
985                         len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
986                         break;
987                 case ISP_CFG_N:
988                         len += snprintf(buf + len, PAGE_SIZE-len,
989                             "N_Port to N_Port\n");
990                         break;
991                 case ISP_CFG_F:
992                         len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
993                         break;
994                 default:
995                         len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
996                         break;
997                 }
998         }
999         return len;
1000 }
1001
1002 static ssize_t
1003 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1004                  char *buf)
1005 {
1006         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1007         int len = 0;
1008
1009         switch (vha->hw->zio_mode) {
1010         case QLA_ZIO_MODE_6:
1011                 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1012                 break;
1013         case QLA_ZIO_DISABLED:
1014                 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1015                 break;
1016         }
1017         return len;
1018 }
1019
1020 static ssize_t
1021 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1022                   const char *buf, size_t count)
1023 {
1024         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1025         struct qla_hw_data *ha = vha->hw;
1026         int val = 0;
1027         uint16_t zio_mode;
1028
1029         if (!IS_ZIO_SUPPORTED(ha))
1030                 return -ENOTSUPP;
1031
1032         if (sscanf(buf, "%d", &val) != 1)
1033                 return -EINVAL;
1034
1035         if (val)
1036                 zio_mode = QLA_ZIO_MODE_6;
1037         else
1038                 zio_mode = QLA_ZIO_DISABLED;
1039
1040         /* Update per-hba values and queue a reset. */
1041         if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1042                 ha->zio_mode = zio_mode;
1043                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1044         }
1045         return strlen(buf);
1046 }
1047
1048 static ssize_t
1049 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1050                        char *buf)
1051 {
1052         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1053
1054         return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1055 }
1056
1057 static ssize_t
1058 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1059                         const char *buf, size_t count)
1060 {
1061         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1062         int val = 0;
1063         uint16_t zio_timer;
1064
1065         if (sscanf(buf, "%d", &val) != 1)
1066                 return -EINVAL;
1067         if (val > 25500 || val < 100)
1068                 return -ERANGE;
1069
1070         zio_timer = (uint16_t)(val / 100);
1071         vha->hw->zio_timer = zio_timer;
1072
1073         return strlen(buf);
1074 }
1075
1076 static ssize_t
1077 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1078                     char *buf)
1079 {
1080         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1081         int len = 0;
1082
1083         if (vha->hw->beacon_blink_led)
1084                 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1085         else
1086                 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1087         return len;
1088 }
1089
1090 static ssize_t
1091 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1092                      const char *buf, size_t count)
1093 {
1094         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1095         struct qla_hw_data *ha = vha->hw;
1096         int val = 0;
1097         int rval;
1098
1099         if (IS_QLA2100(ha) || IS_QLA2200(ha))
1100                 return -EPERM;
1101
1102         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1103                 qla_printk(KERN_WARNING, ha,
1104                     "Abort ISP active -- ignoring beacon request.\n");
1105                 return -EBUSY;
1106         }
1107
1108         if (sscanf(buf, "%d", &val) != 1)
1109                 return -EINVAL;
1110
1111         if (val)
1112                 rval = ha->isp_ops->beacon_on(vha);
1113         else
1114                 rval = ha->isp_ops->beacon_off(vha);
1115
1116         if (rval != QLA_SUCCESS)
1117                 count = 0;
1118
1119         return count;
1120 }
1121
1122 static ssize_t
1123 qla2x00_optrom_bios_version_show(struct device *dev,
1124                                  struct device_attribute *attr, char *buf)
1125 {
1126         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1127         struct qla_hw_data *ha = vha->hw;
1128         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1129             ha->bios_revision[0]);
1130 }
1131
1132 static ssize_t
1133 qla2x00_optrom_efi_version_show(struct device *dev,
1134                                 struct device_attribute *attr, char *buf)
1135 {
1136         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1137         struct qla_hw_data *ha = vha->hw;
1138         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1139             ha->efi_revision[0]);
1140 }
1141
1142 static ssize_t
1143 qla2x00_optrom_fcode_version_show(struct device *dev,
1144                                   struct device_attribute *attr, char *buf)
1145 {
1146         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1147         struct qla_hw_data *ha = vha->hw;
1148         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1149             ha->fcode_revision[0]);
1150 }
1151
1152 static ssize_t
1153 qla2x00_optrom_fw_version_show(struct device *dev,
1154                                struct device_attribute *attr, char *buf)
1155 {
1156         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1157         struct qla_hw_data *ha = vha->hw;
1158         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1159             ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1160             ha->fw_revision[3]);
1161 }
1162
1163 static ssize_t
1164 qla2x00_total_isp_aborts_show(struct device *dev,
1165                               struct device_attribute *attr, char *buf)
1166 {
1167         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1168         struct qla_hw_data *ha = vha->hw;
1169         return snprintf(buf, PAGE_SIZE, "%d\n",
1170             ha->qla_stats.total_isp_aborts);
1171 }
1172
1173 static ssize_t
1174 qla24xx_84xx_fw_version_show(struct device *dev,
1175         struct device_attribute *attr, char *buf)
1176 {
1177         int rval = QLA_SUCCESS;
1178         uint16_t status[2] = {0, 0};
1179         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180         struct qla_hw_data *ha = vha->hw;
1181
1182         if (IS_QLA84XX(ha) && ha->cs84xx) {
1183                 if (ha->cs84xx->op_fw_version == 0) {
1184                         rval = qla84xx_verify_chip(vha, status);
1185         }
1186
1187         if ((rval == QLA_SUCCESS) && (status[0] == 0))
1188                 return snprintf(buf, PAGE_SIZE, "%u\n",
1189                         (uint32_t)ha->cs84xx->op_fw_version);
1190         }
1191
1192         return snprintf(buf, PAGE_SIZE, "\n");
1193 }
1194
1195 static ssize_t
1196 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1197     char *buf)
1198 {
1199         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1200         struct qla_hw_data *ha = vha->hw;
1201
1202         if (!IS_QLA81XX(ha))
1203                 return snprintf(buf, PAGE_SIZE, "\n");
1204
1205         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1206             ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1207             ha->mpi_capabilities);
1208 }
1209
1210 static ssize_t
1211 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1212     char *buf)
1213 {
1214         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1215         struct qla_hw_data *ha = vha->hw;
1216
1217         if (!IS_QLA81XX(ha))
1218                 return snprintf(buf, PAGE_SIZE, "\n");
1219
1220         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1221             ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1222 }
1223
1224 static ssize_t
1225 qla2x00_flash_block_size_show(struct device *dev,
1226                               struct device_attribute *attr, char *buf)
1227 {
1228         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1229         struct qla_hw_data *ha = vha->hw;
1230
1231         return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1232 }
1233
1234 static ssize_t
1235 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1236     char *buf)
1237 {
1238         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1239
1240         if (!IS_QLA81XX(vha->hw))
1241                 return snprintf(buf, PAGE_SIZE, "\n");
1242
1243         return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1244 }
1245
1246 static ssize_t
1247 qla2x00_vn_port_mac_address_show(struct device *dev,
1248     struct device_attribute *attr, char *buf)
1249 {
1250         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1251
1252         if (!IS_QLA81XX(vha->hw))
1253                 return snprintf(buf, PAGE_SIZE, "\n");
1254
1255         return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1256             vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1257             vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1258             vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1259 }
1260
1261 static ssize_t
1262 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1263     char *buf)
1264 {
1265         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1266
1267         return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1268 }
1269
1270 static ssize_t
1271 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1272     char *buf)
1273 {
1274         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1275         int rval = QLA_FUNCTION_FAILED;
1276         uint16_t state[5];
1277
1278         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1279                 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1280                 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n",
1281                         __func__, vha->host_no));
1282         else if (!vha->hw->flags.eeh_busy)
1283                 rval = qla2x00_get_firmware_state(vha, state);
1284         if (rval != QLA_SUCCESS)
1285                 memset(state, -1, sizeof(state));
1286
1287         return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1288             state[1], state[2], state[3], state[4]);
1289 }
1290
1291 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1292 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1293 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1294 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1295 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1296 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1297 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1298 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1299 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1300 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1301 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1302                    qla2x00_zio_timer_store);
1303 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1304                    qla2x00_beacon_store);
1305 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1306                    qla2x00_optrom_bios_version_show, NULL);
1307 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1308                    qla2x00_optrom_efi_version_show, NULL);
1309 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1310                    qla2x00_optrom_fcode_version_show, NULL);
1311 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1312                    NULL);
1313 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1314                    NULL);
1315 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1316                    NULL);
1317 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1318 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1319 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1320                    NULL);
1321 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1322 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1323                    qla2x00_vn_port_mac_address_show, NULL);
1324 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1325 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1326
1327 struct device_attribute *qla2x00_host_attrs[] = {
1328         &dev_attr_driver_version,
1329         &dev_attr_fw_version,
1330         &dev_attr_serial_num,
1331         &dev_attr_isp_name,
1332         &dev_attr_isp_id,
1333         &dev_attr_model_name,
1334         &dev_attr_model_desc,
1335         &dev_attr_pci_info,
1336         &dev_attr_link_state,
1337         &dev_attr_zio,
1338         &dev_attr_zio_timer,
1339         &dev_attr_beacon,
1340         &dev_attr_optrom_bios_version,
1341         &dev_attr_optrom_efi_version,
1342         &dev_attr_optrom_fcode_version,
1343         &dev_attr_optrom_fw_version,
1344         &dev_attr_84xx_fw_version,
1345         &dev_attr_total_isp_aborts,
1346         &dev_attr_mpi_version,
1347         &dev_attr_phy_version,
1348         &dev_attr_flash_block_size,
1349         &dev_attr_vlan_id,
1350         &dev_attr_vn_port_mac_address,
1351         &dev_attr_fabric_param,
1352         &dev_attr_fw_state,
1353         NULL,
1354 };
1355
1356 /* Host attributes. */
1357
1358 static void
1359 qla2x00_get_host_port_id(struct Scsi_Host *shost)
1360 {
1361         scsi_qla_host_t *vha = shost_priv(shost);
1362
1363         fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1364             vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1365 }
1366
1367 static void
1368 qla2x00_get_host_speed(struct Scsi_Host *shost)
1369 {
1370         struct qla_hw_data *ha = ((struct scsi_qla_host *)
1371                                         (shost_priv(shost)))->hw;
1372         u32 speed = FC_PORTSPEED_UNKNOWN;
1373
1374         switch (ha->link_data_rate) {
1375         case PORT_SPEED_1GB:
1376                 speed = FC_PORTSPEED_1GBIT;
1377                 break;
1378         case PORT_SPEED_2GB:
1379                 speed = FC_PORTSPEED_2GBIT;
1380                 break;
1381         case PORT_SPEED_4GB:
1382                 speed = FC_PORTSPEED_4GBIT;
1383                 break;
1384         case PORT_SPEED_8GB:
1385                 speed = FC_PORTSPEED_8GBIT;
1386                 break;
1387         case PORT_SPEED_10GB:
1388                 speed = FC_PORTSPEED_10GBIT;
1389                 break;
1390         }
1391         fc_host_speed(shost) = speed;
1392 }
1393
1394 static void
1395 qla2x00_get_host_port_type(struct Scsi_Host *shost)
1396 {
1397         scsi_qla_host_t *vha = shost_priv(shost);
1398         uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1399
1400         if (vha->vp_idx) {
1401                 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1402                 return;
1403         }
1404         switch (vha->hw->current_topology) {
1405         case ISP_CFG_NL:
1406                 port_type = FC_PORTTYPE_LPORT;
1407                 break;
1408         case ISP_CFG_FL:
1409                 port_type = FC_PORTTYPE_NLPORT;
1410                 break;
1411         case ISP_CFG_N:
1412                 port_type = FC_PORTTYPE_PTP;
1413                 break;
1414         case ISP_CFG_F:
1415                 port_type = FC_PORTTYPE_NPORT;
1416                 break;
1417         }
1418         fc_host_port_type(shost) = port_type;
1419 }
1420
1421 static void
1422 qla2x00_get_starget_node_name(struct scsi_target *starget)
1423 {
1424         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1425         scsi_qla_host_t *vha = shost_priv(host);
1426         fc_port_t *fcport;
1427         u64 node_name = 0;
1428
1429         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1430                 if (fcport->rport &&
1431                     starget->id == fcport->rport->scsi_target_id) {
1432                         node_name = wwn_to_u64(fcport->node_name);
1433                         break;
1434                 }
1435         }
1436
1437         fc_starget_node_name(starget) = node_name;
1438 }
1439
1440 static void
1441 qla2x00_get_starget_port_name(struct scsi_target *starget)
1442 {
1443         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1444         scsi_qla_host_t *vha = shost_priv(host);
1445         fc_port_t *fcport;
1446         u64 port_name = 0;
1447
1448         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1449                 if (fcport->rport &&
1450                     starget->id == fcport->rport->scsi_target_id) {
1451                         port_name = wwn_to_u64(fcport->port_name);
1452                         break;
1453                 }
1454         }
1455
1456         fc_starget_port_name(starget) = port_name;
1457 }
1458
1459 static void
1460 qla2x00_get_starget_port_id(struct scsi_target *starget)
1461 {
1462         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1463         scsi_qla_host_t *vha = shost_priv(host);
1464         fc_port_t *fcport;
1465         uint32_t port_id = ~0U;
1466
1467         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1468                 if (fcport->rport &&
1469                     starget->id == fcport->rport->scsi_target_id) {
1470                         port_id = fcport->d_id.b.domain << 16 |
1471                             fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1472                         break;
1473                 }
1474         }
1475
1476         fc_starget_port_id(starget) = port_id;
1477 }
1478
1479 static void
1480 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1481 {
1482         if (timeout)
1483                 rport->dev_loss_tmo = timeout;
1484         else
1485                 rport->dev_loss_tmo = 1;
1486 }
1487
1488 static void
1489 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1490 {
1491         struct Scsi_Host *host = rport_to_shost(rport);
1492         fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1493
1494         if (!fcport)
1495                 return;
1496
1497         if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1498                 return;
1499
1500         if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1501                 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1502                 return;
1503         }
1504
1505         /*
1506          * Transport has effectively 'deleted' the rport, clear
1507          * all local references.
1508          */
1509         spin_lock_irq(host->host_lock);
1510         fcport->rport = NULL;
1511         *((fc_port_t **)rport->dd_data) = NULL;
1512         spin_unlock_irq(host->host_lock);
1513 }
1514
1515 static void
1516 qla2x00_terminate_rport_io(struct fc_rport *rport)
1517 {
1518         fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1519
1520         if (!fcport)
1521                 return;
1522
1523         if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1524                 return;
1525
1526         if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1527                 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1528                 return;
1529         }
1530         /*
1531          * At this point all fcport's software-states are cleared.  Perform any
1532          * final cleanup of firmware resources (PCBs and XCBs).
1533          */
1534         if (fcport->loop_id != FC_NO_LOOP_ID &&
1535             !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1536                 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1537                         fcport->loop_id, fcport->d_id.b.domain,
1538                         fcport->d_id.b.area, fcport->d_id.b.al_pa);
1539 }
1540
1541 static int
1542 qla2x00_issue_lip(struct Scsi_Host *shost)
1543 {
1544         scsi_qla_host_t *vha = shost_priv(shost);
1545
1546         qla2x00_loop_reset(vha);
1547         return 0;
1548 }
1549
1550 static struct fc_host_statistics *
1551 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1552 {
1553         scsi_qla_host_t *vha = shost_priv(shost);
1554         struct qla_hw_data *ha = vha->hw;
1555         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1556         int rval;
1557         struct link_statistics *stats;
1558         dma_addr_t stats_dma;
1559         struct fc_host_statistics *pfc_host_stat;
1560
1561         pfc_host_stat = &ha->fc_host_stat;
1562         memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1563
1564         if (test_bit(UNLOADING, &vha->dpc_flags))
1565                 goto done;
1566
1567         if (unlikely(pci_channel_offline(ha->pdev)))
1568                 goto done;
1569
1570         stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1571         if (stats == NULL) {
1572                 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
1573                     __func__, base_vha->host_no));
1574                 goto done;
1575         }
1576         memset(stats, 0, DMA_POOL_SIZE);
1577
1578         rval = QLA_FUNCTION_FAILED;
1579         if (IS_FWI2_CAPABLE(ha)) {
1580                 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1581         } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1582                     !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1583                     !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1584                     !ha->dpc_active) {
1585                 /* Must be in a 'READY' state for statistics retrieval. */
1586                 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1587                                                 stats, stats_dma);
1588         }
1589
1590         if (rval != QLA_SUCCESS)
1591                 goto done_free;
1592
1593         pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1594         pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1595         pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1596         pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1597         pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1598         pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1599         if (IS_FWI2_CAPABLE(ha)) {
1600                 pfc_host_stat->lip_count = stats->lip_cnt;
1601                 pfc_host_stat->tx_frames = stats->tx_frames;
1602                 pfc_host_stat->rx_frames = stats->rx_frames;
1603                 pfc_host_stat->dumped_frames = stats->dumped_frames;
1604                 pfc_host_stat->nos_count = stats->nos_rcvd;
1605         }
1606         pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1607         pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1608
1609 done_free:
1610         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1611 done:
1612         return pfc_host_stat;
1613 }
1614
1615 static void
1616 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1617 {
1618         scsi_qla_host_t *vha = shost_priv(shost);
1619
1620         qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1621 }
1622
1623 static void
1624 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1625 {
1626         scsi_qla_host_t *vha = shost_priv(shost);
1627
1628         set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1629 }
1630
1631 static void
1632 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1633 {
1634         scsi_qla_host_t *vha = shost_priv(shost);
1635         u64 node_name;
1636
1637         if (vha->device_flags & SWITCH_FOUND)
1638                 node_name = wwn_to_u64(vha->fabric_node_name);
1639         else
1640                 node_name = wwn_to_u64(vha->node_name);
1641
1642         fc_host_fabric_name(shost) = node_name;
1643 }
1644
1645 static void
1646 qla2x00_get_host_port_state(struct Scsi_Host *shost)
1647 {
1648         scsi_qla_host_t *vha = shost_priv(shost);
1649         struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1650
1651         if (!base_vha->flags.online)
1652                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1653         else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1654                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1655         else
1656                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1657 }
1658
1659 static int
1660 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1661 {
1662         int     ret = 0;
1663         uint8_t qos = 0;
1664         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1665         scsi_qla_host_t *vha = NULL;
1666         struct qla_hw_data *ha = base_vha->hw;
1667         uint16_t options = 0;
1668         int     cnt;
1669         struct req_que *req = ha->req_q_map[0];
1670
1671         ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1672         if (ret) {
1673                 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
1674                     "status %x\n", ret));
1675                 return (ret);
1676         }
1677
1678         vha = qla24xx_create_vhost(fc_vport);
1679         if (vha == NULL) {
1680                 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
1681                     vha));
1682                 return FC_VPORT_FAILED;
1683         }
1684         if (disable) {
1685                 atomic_set(&vha->vp_state, VP_OFFLINE);
1686                 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1687         } else
1688                 atomic_set(&vha->vp_state, VP_FAILED);
1689
1690         /* ready to create vport */
1691         qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1692                                                         vha->vp_idx);
1693
1694         /* initialized vport states */
1695         atomic_set(&vha->loop_state, LOOP_DOWN);
1696         vha->vp_err_state=  VP_ERR_PORTDWN;
1697         vha->vp_prev_err_state=  VP_ERR_UNKWN;
1698         /* Check if physical ha port is Up */
1699         if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1700             atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1701                 /* Don't retry or attempt login of this virtual port */
1702                 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1703                     base_vha->host_no));
1704                 atomic_set(&vha->loop_state, LOOP_DEAD);
1705                 if (!disable)
1706                         fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1707         }
1708
1709         if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1710                                    &ha->pdev->dev)) {
1711                 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1712                         vha->host_no, vha->vp_idx));
1713                 goto vport_create_failed_2;
1714         }
1715
1716         /* initialize attributes */
1717         fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1718         fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1719         fc_host_supported_classes(vha->host) =
1720                 fc_host_supported_classes(base_vha->host);
1721         fc_host_supported_speeds(vha->host) =
1722                 fc_host_supported_speeds(base_vha->host);
1723
1724         qla24xx_vport_disable(fc_vport, disable);
1725
1726         if (ha->flags.cpu_affinity_enabled) {
1727                 req = ha->req_q_map[1];
1728                 goto vport_queue;
1729         } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1730                 goto vport_queue;
1731         /* Create a request queue in QoS mode for the vport */
1732         for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1733                 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1734                         && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1735                                         8) == 0) {
1736                         qos = ha->npiv_info[cnt].q_qos;
1737                         break;
1738                 }
1739         }
1740         if (qos) {
1741                 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1742                         qos);
1743                 if (!ret)
1744                         qla_printk(KERN_WARNING, ha,
1745                         "Can't create request queue for vp_idx:%d\n",
1746                         vha->vp_idx);
1747                 else {
1748                         DEBUG2(qla_printk(KERN_INFO, ha,
1749                         "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1750                         ret, qos, vha->vp_idx));
1751                         req = ha->req_q_map[ret];
1752                 }
1753         }
1754
1755 vport_queue:
1756         vha->req = req;
1757         return 0;
1758
1759 vport_create_failed_2:
1760         qla24xx_disable_vp(vha);
1761         qla24xx_deallocate_vp_id(vha);
1762         scsi_host_put(vha->host);
1763         return FC_VPORT_FAILED;
1764 }
1765
1766 static int
1767 qla24xx_vport_delete(struct fc_vport *fc_vport)
1768 {
1769         scsi_qla_host_t *vha = fc_vport->dd_data;
1770         fc_port_t *fcport, *tfcport;
1771         struct qla_hw_data *ha = vha->hw;
1772         uint16_t id = vha->vp_idx;
1773
1774         while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1775             test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1776                 msleep(1000);
1777
1778         qla24xx_disable_vp(vha);
1779
1780         fc_remove_host(vha->host);
1781
1782         scsi_remove_host(vha->host);
1783
1784         list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1785                 list_del(&fcport->list);
1786                 kfree(fcport);
1787                 fcport = NULL;
1788         }
1789
1790         qla24xx_deallocate_vp_id(vha);
1791
1792         mutex_lock(&ha->vport_lock);
1793         ha->cur_vport_count--;
1794         clear_bit(vha->vp_idx, ha->vp_idx_map);
1795         mutex_unlock(&ha->vport_lock);
1796
1797         if (vha->timer_active) {
1798                 qla2x00_vp_stop_timer(vha);
1799                 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1800                     "has stopped\n",
1801                     vha->host_no, vha->vp_idx, vha));
1802         }
1803
1804         if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1805                 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1806                         qla_printk(KERN_WARNING, ha,
1807                                 "Queue delete failed.\n");
1808         }
1809
1810         scsi_host_put(vha->host);
1811         qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
1812         return 0;
1813 }
1814
1815 static int
1816 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1817 {
1818         scsi_qla_host_t *vha = fc_vport->dd_data;
1819
1820         if (disable)
1821                 qla24xx_disable_vp(vha);
1822         else
1823                 qla24xx_enable_vp(vha);
1824
1825         return 0;
1826 }
1827
1828 /* BSG support for ELS/CT pass through */
1829 inline srb_t *
1830 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1831 {
1832         srb_t *sp;
1833         struct qla_hw_data *ha = vha->hw;
1834         struct srb_bsg_ctx *ctx;
1835
1836         sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1837         if (!sp)
1838                 goto done;
1839         ctx = kzalloc(size, GFP_KERNEL);
1840         if (!ctx) {
1841                 mempool_free(sp, ha->srb_mempool);
1842                 goto done;
1843         }
1844
1845         memset(sp, 0, sizeof(*sp));
1846         sp->fcport = fcport;
1847         sp->ctx = ctx;
1848 done:
1849         return sp;
1850 }
1851
1852 static int
1853 qla2x00_process_els(struct fc_bsg_job *bsg_job)
1854 {
1855         struct fc_rport *rport;
1856         fc_port_t *fcport;
1857         struct Scsi_Host *host;
1858         scsi_qla_host_t *vha;
1859         struct qla_hw_data *ha;
1860         srb_t *sp;
1861         const char *type;
1862         int req_sg_cnt, rsp_sg_cnt;
1863         int rval =  (DRIVER_ERROR << 16);
1864         uint16_t nextlid = 0;
1865         struct srb_bsg *els;
1866
1867         /*  Multiple SG's are not supported for ELS requests */
1868         if (bsg_job->request_payload.sg_cnt > 1 ||
1869                 bsg_job->reply_payload.sg_cnt > 1) {
1870                 DEBUG2(printk(KERN_INFO
1871                     "multiple SG's are not supported for ELS requests"
1872                     " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1873                     bsg_job->request_payload.sg_cnt,
1874                     bsg_job->reply_payload.sg_cnt));
1875                 rval = -EPERM;
1876                 goto done;
1877         }
1878
1879         /* ELS request for rport */
1880         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1881                 rport = bsg_job->rport;
1882                 fcport = *(fc_port_t **) rport->dd_data;
1883                 host = rport_to_shost(rport);
1884                 vha = shost_priv(host);
1885                 ha = vha->hw;
1886                 type = "FC_BSG_RPT_ELS";
1887
1888                 /* make sure the rport is logged in,
1889                  * if not perform fabric login
1890                  */
1891                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1892                         DEBUG2(qla_printk(KERN_WARNING, ha,
1893                             "failed to login port %06X for ELS passthru\n",
1894                             fcport->d_id.b24));
1895                         rval = -EIO;
1896                         goto done;
1897                 }
1898         } else {
1899                 host = bsg_job->shost;
1900                 vha = shost_priv(host);
1901                 ha = vha->hw;
1902                 type = "FC_BSG_HST_ELS_NOLOGIN";
1903
1904                 /* Allocate a dummy fcport structure, since functions
1905                  * preparing the IOCB and mailbox command retrieves port
1906                  * specific information from fcport structure. For Host based
1907                  * ELS commands there will be no fcport structure allocated
1908                  */
1909                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1910                 if (!fcport) {
1911                         rval = -ENOMEM;
1912                         goto done;
1913                 }
1914
1915                 /* Initialize all required  fields of fcport */
1916                 fcport->vha = vha;
1917                 fcport->vp_idx = vha->vp_idx;
1918                 fcport->d_id.b.al_pa =
1919                     bsg_job->request->rqst_data.h_els.port_id[0];
1920                 fcport->d_id.b.area =
1921                     bsg_job->request->rqst_data.h_els.port_id[1];
1922                 fcport->d_id.b.domain =
1923                     bsg_job->request->rqst_data.h_els.port_id[2];
1924                 fcport->loop_id =
1925                     (fcport->d_id.b.al_pa == 0xFD) ?
1926                     NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1927         }
1928
1929         if (!vha->flags.online) {
1930                 DEBUG2(qla_printk(KERN_WARNING, ha,
1931                     "host not online\n"));
1932                 rval = -EIO;
1933                 goto done;
1934         }
1935
1936         req_sg_cnt =
1937             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1938             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1939         if (!req_sg_cnt) {
1940                 rval = -ENOMEM;
1941                 goto done_free_fcport;
1942         }
1943         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1944             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945         if (!rsp_sg_cnt) {
1946                 rval = -ENOMEM;
1947                 goto done_free_fcport;
1948         }
1949
1950         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1951             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1952         {
1953                 DEBUG2(printk(KERN_INFO
1954                     "dma mapping resulted in different sg counts \
1955                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
1956                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1957                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1958                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1959                 rval = -EAGAIN;
1960                 goto done_unmap_sg;
1961         }
1962
1963         /* Alloc SRB structure */
1964         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1965         if (!sp) {
1966                 rval = -ENOMEM;
1967                 goto done_unmap_sg;
1968         }
1969
1970         els = sp->ctx;
1971         els->ctx.type =
1972             (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1973             SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1974         els->bsg_job = bsg_job;
1975
1976         DEBUG2(qla_printk(KERN_INFO, ha,
1977             "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1978             "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1979             bsg_job->request->rqst_data.h_els.command_code,
1980             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1981             fcport->d_id.b.al_pa));
1982
1983         rval = qla2x00_start_sp(sp);
1984         if (rval != QLA_SUCCESS) {
1985                 kfree(sp->ctx);
1986                 mempool_free(sp, ha->srb_mempool);
1987                 rval = -EIO;
1988                 goto done_unmap_sg;
1989         }
1990         return rval;
1991
1992 done_unmap_sg:
1993         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1994                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1995         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1996                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1997         goto done_free_fcport;
1998
1999 done_free_fcport:
2000         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
2001                 kfree(fcport);
2002 done:
2003         return rval;
2004 }
2005
2006 static int
2007 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2008 {
2009         srb_t *sp;
2010         struct Scsi_Host *host = bsg_job->shost;
2011         scsi_qla_host_t *vha = shost_priv(host);
2012         struct qla_hw_data *ha = vha->hw;
2013         int rval = (DRIVER_ERROR << 16);
2014         int req_sg_cnt, rsp_sg_cnt;
2015         uint16_t loop_id;
2016         struct fc_port *fcport;
2017         char  *type = "FC_BSG_HST_CT";
2018         struct srb_bsg *ct;
2019
2020         /* pass through is supported only for ISP 4Gb or higher */
2021         if (!IS_FWI2_CAPABLE(ha)) {
2022                 DEBUG2(qla_printk(KERN_INFO, ha,
2023                     "scsi(%ld):Firmware is not capable to support FC "
2024                     "CT pass thru\n", vha->host_no));
2025                 rval = -EPERM;
2026                 goto done;
2027         }
2028
2029         req_sg_cnt =
2030             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2031             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2032         if (!req_sg_cnt) {
2033                 rval = -ENOMEM;
2034                 goto done;
2035         }
2036
2037         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2038             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2039         if (!rsp_sg_cnt) {
2040                 rval = -ENOMEM;
2041                 goto done;
2042         }
2043
2044         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
2045                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2046         {
2047                 DEBUG2(qla_printk(KERN_WARNING, ha,
2048                     "dma mapping resulted in different sg counts \
2049                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
2050                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2051                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
2052                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2053                 rval = -EAGAIN;
2054                 goto done_unmap_sg;
2055         }
2056
2057         if (!vha->flags.online) {
2058                 DEBUG2(qla_printk(KERN_WARNING, ha,
2059                     "host not online\n"));
2060                 rval = -EIO;
2061                 goto done_unmap_sg;
2062         }
2063
2064         loop_id =
2065             (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2066             >> 24;
2067         switch (loop_id) {
2068                 case 0xFC:
2069                         loop_id = cpu_to_le16(NPH_SNS);
2070                         break;
2071                 case 0xFA:
2072                         loop_id = vha->mgmt_svr_loop_id;
2073                         break;
2074                 default:
2075                         DEBUG2(qla_printk(KERN_INFO, ha,
2076                             "Unknown loop id: %x\n", loop_id));
2077                         rval = -EINVAL;
2078                         goto done_unmap_sg;
2079         }
2080
2081         /* Allocate a dummy fcport structure, since functions preparing the
2082          * IOCB and mailbox command retrieves port specific information
2083          * from fcport structure. For Host based ELS commands there will be
2084          * no fcport structure allocated
2085          */
2086         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2087         if (!fcport)
2088         {
2089                 rval = -ENOMEM;
2090                 goto  done_unmap_sg;
2091         }
2092
2093         /* Initialize all required  fields of fcport */
2094         fcport->vha = vha;
2095         fcport->vp_idx = vha->vp_idx;
2096         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2097         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2098         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2099         fcport->loop_id = loop_id;
2100
2101         /* Alloc SRB structure */
2102         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2103         if (!sp) {
2104                 rval = -ENOMEM;
2105                 goto done_free_fcport;
2106         }
2107
2108         ct = sp->ctx;
2109         ct->ctx.type = SRB_CT_CMD;
2110         ct->bsg_job = bsg_job;
2111
2112         DEBUG2(qla_printk(KERN_INFO, ha,
2113             "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2114             "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2115             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2116             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2117             fcport->d_id.b.al_pa));
2118
2119         rval = qla2x00_start_sp(sp);
2120         if (rval != QLA_SUCCESS) {
2121                 kfree(sp->ctx);
2122                 mempool_free(sp, ha->srb_mempool);
2123                 rval = -EIO;
2124                 goto done_free_fcport;
2125         }
2126         return rval;
2127
2128 done_free_fcport:
2129         kfree(fcport);
2130 done_unmap_sg:
2131         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2132             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2133         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2134             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2135 done:
2136         return rval;
2137 }
2138
2139 static int
2140 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2141 {
2142         struct Scsi_Host *host = bsg_job->shost;
2143         scsi_qla_host_t *vha = shost_priv(host);
2144         struct qla_hw_data *ha = vha->hw;
2145         int rval;
2146         uint8_t command_sent;
2147         uint32_t vendor_cmd;
2148         char *type;
2149         struct msg_echo_lb elreq;
2150         uint16_t response[MAILBOX_REGISTER_COUNT];
2151         uint8_t* fw_sts_ptr;
2152         uint8_t *req_data;
2153         dma_addr_t req_data_dma;
2154         uint32_t req_data_len;
2155         uint8_t *rsp_data;
2156         dma_addr_t rsp_data_dma;
2157         uint32_t rsp_data_len;
2158
2159         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2160             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2161             test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2162                 rval = -EBUSY;
2163                 goto done;
2164         }
2165
2166         if (!vha->flags.online) {
2167                 DEBUG2(qla_printk(KERN_WARNING, ha,
2168                     "host not online\n"));
2169                 rval = -EIO;
2170                 goto done;
2171         }
2172
2173         elreq.req_sg_cnt =
2174             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2175             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2176         if (!elreq.req_sg_cnt) {
2177                 rval = -ENOMEM;
2178                 goto done;
2179         }
2180         elreq.rsp_sg_cnt =
2181             dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2182             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2183         if (!elreq.rsp_sg_cnt) {
2184                 rval = -ENOMEM;
2185                 goto done;
2186         }
2187
2188         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
2189             (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2190         {
2191                 DEBUG2(printk(KERN_INFO
2192                     "dma mapping resulted in different sg counts \
2193                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
2194                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2195                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2196                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2197                 rval = -EAGAIN;
2198                 goto done_unmap_sg;
2199         }
2200         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2201         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2202             &req_data_dma, GFP_KERNEL);
2203
2204         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2205             &rsp_data_dma, GFP_KERNEL);
2206
2207         /* Copy the request buffer in req_data now */
2208         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2209             bsg_job->request_payload.sg_cnt, req_data,
2210             req_data_len);
2211
2212         elreq.send_dma = req_data_dma;
2213         elreq.rcv_dma = rsp_data_dma;
2214         elreq.transfer_size = req_data_len;
2215
2216         /* Vendor cmd : loopback or ECHO diagnostic
2217          * Options:
2218          *      Loopback : Either internal or external loopback
2219          *      ECHO: ECHO ELS or Vendor specific FC4  link data
2220          */
2221         vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2222         elreq.options =
2223             *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2224             + 1);
2225
2226         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2227         case QL_VND_LOOPBACK:
2228                 if (ha->current_topology != ISP_CFG_F) {
2229                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
2230
2231                         DEBUG2(qla_printk(KERN_INFO, ha,
2232                                 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2233                                 vha->host_no, type, vendor_cmd, elreq.options));
2234
2235                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
2236                         rval = qla2x00_loopback_test(vha, &elreq, response);
2237                         if (IS_QLA81XX(ha)) {
2238                                 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2239                                         DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2240                                                 "ISP\n", __func__, vha->host_no));
2241                                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2242                                         qla2xxx_wake_dpc(vha);
2243                                  }
2244                         }
2245                 } else {
2246                         type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2247                         DEBUG2(qla_printk(KERN_INFO, ha,
2248                                 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2249                                 vha->host_no, type, vendor_cmd, elreq.options));
2250
2251                         command_sent = INT_DEF_LB_ECHO_CMD;
2252                         rval = qla2x00_echo_test(vha, &elreq, response);
2253                 }
2254                 break;
2255         case QLA84_RESET:
2256                 if (!IS_QLA84XX(vha->hw)) {
2257                         rval = -EINVAL;
2258                         DEBUG16(printk(
2259                                 "%s(%ld): 8xxx exiting.\n",
2260                                 __func__, vha->host_no));
2261                         return rval;
2262                 }
2263                 rval = qla84xx_reset(vha, &elreq, bsg_job);
2264                 break;
2265         case QLA84_MGMT_CMD:
2266                 if (!IS_QLA84XX(vha->hw)) {
2267                         rval = -EINVAL;
2268                         DEBUG16(printk(
2269                                 "%s(%ld): 8xxx exiting.\n",
2270                                 __func__, vha->host_no));
2271                         return rval;
2272                 }
2273                 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2274                 break;
2275         default:
2276                 rval = -ENOSYS;
2277         }
2278
2279         if (rval != QLA_SUCCESS) {
2280                 DEBUG2(qla_printk(KERN_WARNING, ha,
2281                         "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2282                 rval = 0;
2283                 bsg_job->reply->result = (DID_ERROR << 16);
2284                 bsg_job->reply->reply_payload_rcv_len = 0;
2285                 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2286                 memcpy( fw_sts_ptr, response, sizeof(response));
2287                 fw_sts_ptr += sizeof(response);
2288                 *fw_sts_ptr = command_sent;
2289         } else {
2290                 DEBUG2(qla_printk(KERN_WARNING, ha,
2291                         "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2292                 rval = bsg_job->reply->result = 0;
2293                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2294                 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2295                 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2296                 memcpy(fw_sts_ptr, response, sizeof(response));
2297                 fw_sts_ptr += sizeof(response);
2298                 *fw_sts_ptr = command_sent;
2299                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2300                 bsg_job->reply_payload.sg_cnt, rsp_data,
2301                 rsp_data_len);
2302         }
2303         bsg_job->job_done(bsg_job);
2304
2305 done_unmap_sg:
2306
2307         if(req_data)
2308                 dma_free_coherent(&ha->pdev->dev, req_data_len,
2309                         req_data, req_data_dma);
2310         dma_unmap_sg(&ha->pdev->dev,
2311             bsg_job->request_payload.sg_list,
2312             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2313         dma_unmap_sg(&ha->pdev->dev,
2314             bsg_job->reply_payload.sg_list,
2315             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2316
2317 done:
2318         return rval;
2319 }
2320
2321 static int
2322 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2323 {
2324         int ret = -EINVAL;
2325
2326         switch (bsg_job->request->msgcode) {
2327                 case FC_BSG_RPT_ELS:
2328                 case FC_BSG_HST_ELS_NOLOGIN:
2329                         ret = qla2x00_process_els(bsg_job);
2330                         break;
2331                 case FC_BSG_HST_CT:
2332                         ret = qla2x00_process_ct(bsg_job);
2333                         break;
2334                 case FC_BSG_HST_VENDOR:
2335                         ret = qla2x00_process_vendor_specific(bsg_job);
2336                         break;
2337                 case FC_BSG_HST_ADD_RPORT:
2338                 case FC_BSG_HST_DEL_RPORT:
2339                 case FC_BSG_RPT_CT:
2340                 default:
2341                         DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2342                         break;
2343         }
2344         return ret;
2345 }
2346
2347 static int
2348 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2349 {
2350         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2351         struct qla_hw_data *ha = vha->hw;
2352         srb_t *sp;
2353         int cnt, que;
2354         unsigned long flags;
2355         struct req_que *req;
2356         struct srb_bsg *sp_bsg;
2357
2358         /* find the bsg job from the active list of commands */
2359         spin_lock_irqsave(&ha->hardware_lock, flags);
2360         for (que = 0; que < ha->max_req_queues; que++) {
2361                 req = ha->req_q_map[que];
2362                 if (!req)
2363                         continue;
2364
2365                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2366                         sp = req->outstanding_cmds[cnt];
2367
2368                         if (sp) {
2369                                 sp_bsg = (struct srb_bsg*)sp->ctx;
2370
2371                                 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2372                                     (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2373                                     || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2374                                     (sp_bsg->bsg_job == bsg_job)) {
2375                                         if (ha->isp_ops->abort_command(sp)) {
2376                                                 DEBUG2(qla_printk(KERN_INFO, ha,
2377                                                 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2378                                                 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2379                                         } else {
2380                                                 DEBUG2(qla_printk(KERN_INFO, ha,
2381                                                 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2382                                                 bsg_job->req->errors = bsg_job->reply->result = 0;
2383                                         }
2384                                         goto done;
2385                                 }
2386                         }
2387                 }
2388         }
2389         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2390         DEBUG2(qla_printk(KERN_INFO, ha,
2391                 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2392         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2393         return 0;
2394
2395 done:
2396         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2397         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2398                 kfree(sp->fcport);
2399         kfree(sp->ctx);
2400         mempool_free(sp, ha->srb_mempool);
2401         return 0;
2402 }
2403
2404 struct fc_function_template qla2xxx_transport_functions = {
2405
2406         .show_host_node_name = 1,
2407         .show_host_port_name = 1,
2408         .show_host_supported_classes = 1,
2409         .show_host_supported_speeds = 1,
2410
2411         .get_host_port_id = qla2x00_get_host_port_id,
2412         .show_host_port_id = 1,
2413         .get_host_speed = qla2x00_get_host_speed,
2414         .show_host_speed = 1,
2415         .get_host_port_type = qla2x00_get_host_port_type,
2416         .show_host_port_type = 1,
2417         .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2418         .show_host_symbolic_name = 1,
2419         .set_host_system_hostname = qla2x00_set_host_system_hostname,
2420         .show_host_system_hostname = 1,
2421         .get_host_fabric_name = qla2x00_get_host_fabric_name,
2422         .show_host_fabric_name = 1,
2423         .get_host_port_state = qla2x00_get_host_port_state,
2424         .show_host_port_state = 1,
2425
2426         .dd_fcrport_size = sizeof(struct fc_port *),
2427         .show_rport_supported_classes = 1,
2428
2429         .get_starget_node_name = qla2x00_get_starget_node_name,
2430         .show_starget_node_name = 1,
2431         .get_starget_port_name = qla2x00_get_starget_port_name,
2432         .show_starget_port_name = 1,
2433         .get_starget_port_id  = qla2x00_get_starget_port_id,
2434         .show_starget_port_id = 1,
2435
2436         .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2437         .show_rport_dev_loss_tmo = 1,
2438
2439         .issue_fc_host_lip = qla2x00_issue_lip,
2440         .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2441         .terminate_rport_io = qla2x00_terminate_rport_io,
2442         .get_fc_host_stats = qla2x00_get_fc_host_stats,
2443
2444         .vport_create = qla24xx_vport_create,
2445         .vport_disable = qla24xx_vport_disable,
2446         .vport_delete = qla24xx_vport_delete,
2447         .bsg_request = qla24xx_bsg_request,
2448         .bsg_timeout = qla24xx_bsg_timeout,
2449 };
2450
2451 struct fc_function_template qla2xxx_transport_vport_functions = {
2452
2453         .show_host_node_name = 1,
2454         .show_host_port_name = 1,
2455         .show_host_supported_classes = 1,
2456
2457         .get_host_port_id = qla2x00_get_host_port_id,
2458         .show_host_port_id = 1,
2459         .get_host_speed = qla2x00_get_host_speed,
2460         .show_host_speed = 1,
2461         .get_host_port_type = qla2x00_get_host_port_type,
2462         .show_host_port_type = 1,
2463         .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2464         .show_host_symbolic_name = 1,
2465         .set_host_system_hostname = qla2x00_set_host_system_hostname,
2466         .show_host_system_hostname = 1,
2467         .get_host_fabric_name = qla2x00_get_host_fabric_name,
2468         .show_host_fabric_name = 1,
2469         .get_host_port_state = qla2x00_get_host_port_state,
2470         .show_host_port_state = 1,
2471
2472         .dd_fcrport_size = sizeof(struct fc_port *),
2473         .show_rport_supported_classes = 1,
2474
2475         .get_starget_node_name = qla2x00_get_starget_node_name,
2476         .show_starget_node_name = 1,
2477         .get_starget_port_name = qla2x00_get_starget_port_name,
2478         .show_starget_port_name = 1,
2479         .get_starget_port_id  = qla2x00_get_starget_port_id,
2480         .show_starget_port_id = 1,
2481
2482         .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2483         .show_rport_dev_loss_tmo = 1,
2484
2485         .issue_fc_host_lip = qla2x00_issue_lip,
2486         .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2487         .terminate_rport_io = qla2x00_terminate_rport_io,
2488         .get_fc_host_stats = qla2x00_get_fc_host_stats,
2489         .bsg_request = qla24xx_bsg_request,
2490         .bsg_timeout = qla24xx_bsg_timeout,
2491 };
2492
2493 void
2494 qla2x00_init_host_attr(scsi_qla_host_t *vha)
2495 {
2496         struct qla_hw_data *ha = vha->hw;
2497         u32 speed = FC_PORTSPEED_UNKNOWN;
2498
2499         fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2500         fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2501         fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2502         fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2503         fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2504
2505         if (IS_QLA81XX(ha))
2506                 speed = FC_PORTSPEED_10GBIT;
2507         else if (IS_QLA25XX(ha))
2508                 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2509                     FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2510         else if (IS_QLA24XX_TYPE(ha))
2511                 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2512                     FC_PORTSPEED_1GBIT;
2513         else if (IS_QLA23XX(ha))
2514                 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2515         else
2516                 speed = FC_PORTSPEED_1GBIT;
2517         fc_host_supported_speeds(vha->host) = speed;
2518 }
2519 static int
2520 qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2521 {
2522         int             ret = 0;
2523         int             cmd;
2524         uint16_t        cmd_status;
2525
2526         DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2527
2528         cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2529                         == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2530                                 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2531         ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2532         &cmd_status);
2533         return ret;
2534 }
2535
2536 static int
2537 qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2538 {
2539         struct access_chip_84xx *mn;
2540         dma_addr_t mn_dma, mgmt_dma;
2541         void *mgmt_b = NULL;
2542         int ret = 0;
2543         int rsp_hdr_len, len = 0;
2544         struct qla84_msg_mgmt *ql84_mgmt;
2545
2546         ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2547         ql84_mgmt->cmd =
2548                 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2549         ql84_mgmt->mgmtp.u.mem.start_addr =
2550                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2551         ql84_mgmt->len =
2552                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2553         ql84_mgmt->mgmtp.u.config.id =
2554                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2555         ql84_mgmt->mgmtp.u.config.param0 =
2556                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2557         ql84_mgmt->mgmtp.u.config.param1 =
2558                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2559         ql84_mgmt->mgmtp.u.info.type =
2560                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2561         ql84_mgmt->mgmtp.u.info.context =
2562                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2563
2564         rsp_hdr_len = bsg_job->request_payload.payload_len;
2565
2566         mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2567         if (mn == NULL) {
2568                 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2569                 "failed%lu\n", __func__, ha->host_no));
2570                 return -ENOMEM;
2571         }
2572
2573         memset(mn, 0, sizeof (struct access_chip_84xx));
2574
2575         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2576         mn->entry_count = 1;
2577
2578         switch (ql84_mgmt->cmd) {
2579         case QLA84_MGMT_READ_MEM:
2580                 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2581                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2582                 break;
2583         case QLA84_MGMT_WRITE_MEM:
2584                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2585                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2586                 break;
2587         case QLA84_MGMT_CHNG_CONFIG:
2588                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2589                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2590                 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2591                 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2592                 break;
2593         case QLA84_MGMT_GET_INFO:
2594                 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2595                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2596                 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2597                 break;
2598         default:
2599                 ret = -EIO;
2600                 goto exit_mgmt0;
2601         }
2602
2603         if ((len == ql84_mgmt->len) &&
2604                 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2605                 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2606                                 &mgmt_dma, GFP_KERNEL);
2607                 if (mgmt_b == NULL) {
2608                         DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2609                         "failed%lu\n", __func__, ha->host_no));
2610                         ret = -ENOMEM;
2611                         goto exit_mgmt0;
2612                 }
2613                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2614                 mn->dseg_count = cpu_to_le16(1);
2615                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2616                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2617                 mn->dseg_length = cpu_to_le32(len);
2618
2619                 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2620                         memcpy(mgmt_b, ql84_mgmt->payload, len);
2621                 }
2622         }
2623
2624         ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2625         if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2626                 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2627                         if (ret != QLA_SUCCESS)
2628                                 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2629                                         __func__, ha->host_no));
2630         } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2631                         (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2632         }
2633
2634         if (mgmt_b)
2635                 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2636
2637 exit_mgmt0:
2638         dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2639         return ret;
2640 }