Merge git://www.linux-watchdog.org/linux-watchdog
[pandora-kernel.git] / drivers / scsi / qla4xxx / ql4_bsg.c
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c) 2011 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_bsg.h"
11
12 static int
13 qla4xxx_read_flash(struct bsg_job *bsg_job)
14 {
15         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
16         struct scsi_qla_host *ha = to_qla_host(host);
17         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
18         struct iscsi_bsg_request *bsg_req = bsg_job->request;
19         uint32_t offset = 0;
20         uint32_t length = 0;
21         dma_addr_t flash_dma;
22         uint8_t *flash = NULL;
23         int rval = -EINVAL;
24
25         bsg_reply->reply_payload_rcv_len = 0;
26
27         if (unlikely(pci_channel_offline(ha->pdev)))
28                 goto leave;
29
30         if (ql4xxx_reset_active(ha)) {
31                 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
32                 rval = -EBUSY;
33                 goto leave;
34         }
35
36         if (ha->flash_state != QLFLASH_WAITING) {
37                 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
38                            "active\n", __func__);
39                 rval = -EBUSY;
40                 goto leave;
41         }
42
43         ha->flash_state = QLFLASH_READING;
44         offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
45         length = bsg_job->reply_payload.payload_len;
46
47         flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
48                                    GFP_KERNEL);
49         if (!flash) {
50                 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
51                            "data\n", __func__);
52                 rval = -ENOMEM;
53                 goto leave;
54         }
55
56         rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
57         if (rval) {
58                 ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
59                 bsg_reply->result = DID_ERROR << 16;
60                 rval = -EIO;
61         } else {
62                 bsg_reply->reply_payload_rcv_len =
63                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
64                                             bsg_job->reply_payload.sg_cnt,
65                                             flash, length);
66                 bsg_reply->result = DID_OK << 16;
67         }
68
69         bsg_job_done(bsg_job, bsg_reply->result,
70                      bsg_reply->reply_payload_rcv_len);
71         dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
72 leave:
73         ha->flash_state = QLFLASH_WAITING;
74         return rval;
75 }
76
77 static int
78 qla4xxx_update_flash(struct bsg_job *bsg_job)
79 {
80         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
81         struct scsi_qla_host *ha = to_qla_host(host);
82         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
83         struct iscsi_bsg_request *bsg_req = bsg_job->request;
84         uint32_t length = 0;
85         uint32_t offset = 0;
86         uint32_t options = 0;
87         dma_addr_t flash_dma;
88         uint8_t *flash = NULL;
89         int rval = -EINVAL;
90
91         bsg_reply->reply_payload_rcv_len = 0;
92
93         if (unlikely(pci_channel_offline(ha->pdev)))
94                 goto leave;
95
96         if (ql4xxx_reset_active(ha)) {
97                 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
98                 rval = -EBUSY;
99                 goto leave;
100         }
101
102         if (ha->flash_state != QLFLASH_WAITING) {
103                 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
104                            "active\n", __func__);
105                 rval = -EBUSY;
106                 goto leave;
107         }
108
109         ha->flash_state = QLFLASH_WRITING;
110         length = bsg_job->request_payload.payload_len;
111         offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
112         options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
113
114         flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
115                                    GFP_KERNEL);
116         if (!flash) {
117                 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
118                            "data\n", __func__);
119                 rval = -ENOMEM;
120                 goto leave;
121         }
122
123         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
124                           bsg_job->request_payload.sg_cnt, flash, length);
125
126         rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
127         if (rval) {
128                 ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
129                 bsg_reply->result = DID_ERROR << 16;
130                 rval = -EIO;
131         } else
132                 bsg_reply->result = DID_OK << 16;
133
134         bsg_job_done(bsg_job, bsg_reply->result,
135                      bsg_reply->reply_payload_rcv_len);
136         dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
137 leave:
138         ha->flash_state = QLFLASH_WAITING;
139         return rval;
140 }
141
142 static int
143 qla4xxx_get_acb_state(struct bsg_job *bsg_job)
144 {
145         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
146         struct scsi_qla_host *ha = to_qla_host(host);
147         struct iscsi_bsg_request *bsg_req = bsg_job->request;
148         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
149         uint32_t status[MBOX_REG_COUNT];
150         uint32_t acb_idx;
151         uint32_t ip_idx;
152         int rval = -EINVAL;
153
154         bsg_reply->reply_payload_rcv_len = 0;
155
156         if (unlikely(pci_channel_offline(ha->pdev)))
157                 goto leave;
158
159         /* Only 4022 and above adapters are supported */
160         if (is_qla4010(ha))
161                 goto leave;
162
163         if (ql4xxx_reset_active(ha)) {
164                 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
165                 rval = -EBUSY;
166                 goto leave;
167         }
168
169         if (bsg_job->reply_payload.payload_len < sizeof(status)) {
170                 ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
171                            __func__, bsg_job->reply_payload.payload_len);
172                 rval = -EINVAL;
173                 goto leave;
174         }
175
176         acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
177         ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
178
179         rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
180         if (rval) {
181                 ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
182                            __func__);
183                 bsg_reply->result = DID_ERROR << 16;
184                 rval = -EIO;
185         } else {
186                 bsg_reply->reply_payload_rcv_len =
187                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
188                                             bsg_job->reply_payload.sg_cnt,
189                                             status, sizeof(status));
190                 bsg_reply->result = DID_OK << 16;
191         }
192
193         bsg_job_done(bsg_job, bsg_reply->result,
194                      bsg_reply->reply_payload_rcv_len);
195 leave:
196         return rval;
197 }
198
199 static int
200 qla4xxx_read_nvram(struct bsg_job *bsg_job)
201 {
202         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
203         struct scsi_qla_host *ha = to_qla_host(host);
204         struct iscsi_bsg_request *bsg_req = bsg_job->request;
205         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
206         uint32_t offset = 0;
207         uint32_t len = 0;
208         uint32_t total_len = 0;
209         dma_addr_t nvram_dma;
210         uint8_t *nvram = NULL;
211         int rval = -EINVAL;
212
213         bsg_reply->reply_payload_rcv_len = 0;
214
215         if (unlikely(pci_channel_offline(ha->pdev)))
216                 goto leave;
217
218         /* Only 40xx adapters are supported */
219         if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
220                 goto leave;
221
222         if (ql4xxx_reset_active(ha)) {
223                 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
224                 rval = -EBUSY;
225                 goto leave;
226         }
227
228         offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
229         len = bsg_job->reply_payload.payload_len;
230         total_len = offset + len;
231
232         /* total len should not be greater than max NVRAM size */
233         if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
234             ((is_qla4022(ha) || is_qla4032(ha)) &&
235              total_len > QL40X2_NVRAM_SIZE)) {
236                 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
237                            " nvram size, offset=%d len=%d\n",
238                            __func__, offset, len);
239                 goto leave;
240         }
241
242         nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
243                                    GFP_KERNEL);
244         if (!nvram) {
245                 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
246                            "data\n", __func__);
247                 rval = -ENOMEM;
248                 goto leave;
249         }
250
251         rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
252         if (rval) {
253                 ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
254                 bsg_reply->result = DID_ERROR << 16;
255                 rval = -EIO;
256         } else {
257                 bsg_reply->reply_payload_rcv_len =
258                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
259                                             bsg_job->reply_payload.sg_cnt,
260                                             nvram, len);
261                 bsg_reply->result = DID_OK << 16;
262         }
263
264         bsg_job_done(bsg_job, bsg_reply->result,
265                      bsg_reply->reply_payload_rcv_len);
266         dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
267 leave:
268         return rval;
269 }
270
271 static int
272 qla4xxx_update_nvram(struct bsg_job *bsg_job)
273 {
274         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
275         struct scsi_qla_host *ha = to_qla_host(host);
276         struct iscsi_bsg_request *bsg_req = bsg_job->request;
277         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
278         uint32_t offset = 0;
279         uint32_t len = 0;
280         uint32_t total_len = 0;
281         dma_addr_t nvram_dma;
282         uint8_t *nvram = NULL;
283         int rval = -EINVAL;
284
285         bsg_reply->reply_payload_rcv_len = 0;
286
287         if (unlikely(pci_channel_offline(ha->pdev)))
288                 goto leave;
289
290         if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
291                 goto leave;
292
293         if (ql4xxx_reset_active(ha)) {
294                 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
295                 rval = -EBUSY;
296                 goto leave;
297         }
298
299         offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
300         len = bsg_job->request_payload.payload_len;
301         total_len = offset + len;
302
303         /* total len should not be greater than max NVRAM size */
304         if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
305             ((is_qla4022(ha) || is_qla4032(ha)) &&
306              total_len > QL40X2_NVRAM_SIZE)) {
307                 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
308                            " nvram size, offset=%d len=%d\n",
309                            __func__, offset, len);
310                 goto leave;
311         }
312
313         nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
314                                    GFP_KERNEL);
315         if (!nvram) {
316                 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
317                            "data\n", __func__);
318                 rval = -ENOMEM;
319                 goto leave;
320         }
321
322         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
323                           bsg_job->request_payload.sg_cnt, nvram, len);
324
325         rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
326         if (rval) {
327                 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
328                 bsg_reply->result = DID_ERROR << 16;
329                 rval = -EIO;
330         } else
331                 bsg_reply->result = DID_OK << 16;
332
333         bsg_job_done(bsg_job, bsg_reply->result,
334                      bsg_reply->reply_payload_rcv_len);
335         dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
336 leave:
337         return rval;
338 }
339
340 static int
341 qla4xxx_restore_defaults(struct bsg_job *bsg_job)
342 {
343         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
344         struct scsi_qla_host *ha = to_qla_host(host);
345         struct iscsi_bsg_request *bsg_req = bsg_job->request;
346         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
347         uint32_t region = 0;
348         uint32_t field0 = 0;
349         uint32_t field1 = 0;
350         int rval = -EINVAL;
351
352         bsg_reply->reply_payload_rcv_len = 0;
353
354         if (unlikely(pci_channel_offline(ha->pdev)))
355                 goto leave;
356
357         if (is_qla4010(ha))
358                 goto leave;
359
360         if (ql4xxx_reset_active(ha)) {
361                 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
362                 rval = -EBUSY;
363                 goto leave;
364         }
365
366         region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
367         field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
368         field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
369
370         rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
371         if (rval) {
372                 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
373                 bsg_reply->result = DID_ERROR << 16;
374                 rval = -EIO;
375         } else
376                 bsg_reply->result = DID_OK << 16;
377
378         bsg_job_done(bsg_job, bsg_reply->result,
379                      bsg_reply->reply_payload_rcv_len);
380 leave:
381         return rval;
382 }
383
384 static int
385 qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
386 {
387         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
388         struct scsi_qla_host *ha = to_qla_host(host);
389         struct iscsi_bsg_request *bsg_req = bsg_job->request;
390         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
391         uint32_t acb_type = 0;
392         uint32_t len = 0;
393         dma_addr_t acb_dma;
394         uint8_t *acb = NULL;
395         int rval = -EINVAL;
396
397         bsg_reply->reply_payload_rcv_len = 0;
398
399         if (unlikely(pci_channel_offline(ha->pdev)))
400                 goto leave;
401
402         /* Only 4022 and above adapters are supported */
403         if (is_qla4010(ha))
404                 goto leave;
405
406         if (ql4xxx_reset_active(ha)) {
407                 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
408                 rval = -EBUSY;
409                 goto leave;
410         }
411
412         acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
413         len = bsg_job->reply_payload.payload_len;
414         if (len < sizeof(struct addr_ctrl_blk)) {
415                 ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
416                            __func__, len);
417                 rval = -EINVAL;
418                 goto leave;
419         }
420
421         acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
422         if (!acb) {
423                 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
424                            "data\n", __func__);
425                 rval = -ENOMEM;
426                 goto leave;
427         }
428
429         rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
430         if (rval) {
431                 ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
432                 bsg_reply->result = DID_ERROR << 16;
433                 rval = -EIO;
434         } else {
435                 bsg_reply->reply_payload_rcv_len =
436                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
437                                             bsg_job->reply_payload.sg_cnt,
438                                             acb, len);
439                 bsg_reply->result = DID_OK << 16;
440         }
441
442         bsg_job_done(bsg_job, bsg_reply->result,
443                      bsg_reply->reply_payload_rcv_len);
444         dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
445 leave:
446         return rval;
447 }
448
449 /**
450  * qla4xxx_process_vendor_specific - handle vendor specific bsg request
451  * @job: iscsi_bsg_job to handle
452  **/
453 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
454 {
455         struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
456         struct iscsi_bsg_request *bsg_req = bsg_job->request;
457         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
458         struct scsi_qla_host *ha = to_qla_host(host);
459
460         switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
461         case QLISCSI_VND_READ_FLASH:
462                 return qla4xxx_read_flash(bsg_job);
463
464         case QLISCSI_VND_UPDATE_FLASH:
465                 return qla4xxx_update_flash(bsg_job);
466
467         case QLISCSI_VND_GET_ACB_STATE:
468                 return qla4xxx_get_acb_state(bsg_job);
469
470         case QLISCSI_VND_READ_NVRAM:
471                 return qla4xxx_read_nvram(bsg_job);
472
473         case QLISCSI_VND_UPDATE_NVRAM:
474                 return qla4xxx_update_nvram(bsg_job);
475
476         case QLISCSI_VND_RESTORE_DEFAULTS:
477                 return qla4xxx_restore_defaults(bsg_job);
478
479         case QLISCSI_VND_GET_ACB:
480                 return qla4xxx_bsg_get_acb(bsg_job);
481
482         default:
483                 ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
484                            "0x%x\n", __func__, bsg_req->msgcode);
485                 bsg_reply->result = (DID_ERROR << 16);
486                 bsg_reply->reply_payload_rcv_len = 0;
487                 bsg_job_done(bsg_job, bsg_reply->result,
488                              bsg_reply->reply_payload_rcv_len);
489                 return -ENOSYS;
490         }
491 }
492
493 /**
494  * qla4xxx_bsg_request - handle bsg request from ISCSI transport
495  * @job: iscsi_bsg_job to handle
496  */
497 int qla4xxx_bsg_request(struct bsg_job *bsg_job)
498 {
499         struct iscsi_bsg_request *bsg_req = bsg_job->request;
500         struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
501         struct scsi_qla_host *ha = to_qla_host(host);
502
503         switch (bsg_req->msgcode) {
504         case ISCSI_BSG_HST_VENDOR:
505                 return qla4xxx_process_vendor_specific(bsg_job);
506
507         default:
508                 ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
509                            __func__, bsg_req->msgcode);
510         }
511
512         return -ENOSYS;
513 }