c846a63ea672b143c09311deab1e454699063d07
[pandora-kernel.git] / drivers / s390 / scsi / zfcp_dbf.c
1 /*
2  * zfcp device driver
3  *
4  * Debug traces for zfcp.
5  *
6  * Copyright IBM Corp. 2002, 2016
7  */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/module.h>
13 #include <linux/ctype.h>
14 #include <linux/slab.h>
15 #include <asm/debug.h>
16 #include "zfcp_dbf.h"
17 #include "zfcp_ext.h"
18 #include "zfcp_fc.h"
19
20 static u32 dbfsize = 4;
21
22 module_param(dbfsize, uint, 0400);
23 MODULE_PARM_DESC(dbfsize,
24                  "number of pages for each debug feature area (default 4)");
25
26 static inline unsigned int zfcp_dbf_plen(unsigned int offset)
27 {
28         return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
29 }
30
31 static inline
32 void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
33                        u64 req_id)
34 {
35         struct zfcp_dbf_pay *pl = &dbf->pay_buf;
36         u16 offset = 0, rec_length;
37
38         spin_lock(&dbf->pay_lock);
39         memset(pl, 0, sizeof(*pl));
40         pl->fsf_req_id = req_id;
41         memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
42
43         while (offset < length) {
44                 rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
45                                  (u16) (length - offset));
46                 memcpy(pl->data, data + offset, rec_length);
47                 debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
48
49                 offset += rec_length;
50                 pl->counter++;
51         }
52
53         spin_unlock(&dbf->pay_lock);
54 }
55
56 /**
57  * zfcp_dbf_hba_fsf_res - trace event for fsf responses
58  * @tag: tag indicating which kind of unsolicited status has been received
59  * @req: request for which a response was received
60  */
61 void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
62 {
63         struct zfcp_dbf *dbf = req->adapter->dbf;
64         struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
65         struct fsf_qtcb_header *q_head = &req->qtcb->header;
66         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
67         unsigned long flags;
68
69         spin_lock_irqsave(&dbf->hba_lock, flags);
70         memset(rec, 0, sizeof(*rec));
71
72         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
73         rec->id = ZFCP_DBF_HBA_RES;
74         rec->fsf_req_id = req->req_id;
75         rec->fsf_req_status = req->status;
76         rec->fsf_cmd = req->fsf_command;
77         rec->fsf_seq_no = req->seq_no;
78         rec->u.res.req_issued = req->issued;
79         rec->u.res.prot_status = q_pref->prot_status;
80         rec->u.res.fsf_status = q_head->fsf_status;
81         rec->u.res.port_handle = q_head->port_handle;
82         rec->u.res.lun_handle = q_head->lun_handle;
83
84         memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
85                FSF_PROT_STATUS_QUAL_SIZE);
86         memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
87                FSF_STATUS_QUALIFIER_SIZE);
88
89         if (req->fsf_command != FSF_QTCB_FCP_CMND) {
90                 rec->pl_len = q_head->log_length;
91                 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
92                                   rec->pl_len, "fsf_res", req->req_id);
93         }
94
95         debug_event(dbf->hba, level, rec, sizeof(*rec));
96         spin_unlock_irqrestore(&dbf->hba_lock, flags);
97 }
98
99 /**
100  * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
101  * @tag: tag indicating which kind of unsolicited status has been received
102  * @req: request providing the unsolicited status
103  */
104 void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
105 {
106         struct zfcp_dbf *dbf = req->adapter->dbf;
107         struct fsf_status_read_buffer *srb = req->data;
108         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
109         unsigned long flags;
110
111         spin_lock_irqsave(&dbf->hba_lock, flags);
112         memset(rec, 0, sizeof(*rec));
113
114         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
115         rec->id = ZFCP_DBF_HBA_USS;
116         rec->fsf_req_id = req->req_id;
117         rec->fsf_req_status = req->status;
118         rec->fsf_cmd = req->fsf_command;
119
120         if (!srb)
121                 goto log;
122
123         rec->u.uss.status_type = srb->status_type;
124         rec->u.uss.status_subtype = srb->status_subtype;
125         rec->u.uss.d_id = ntoh24(srb->d_id);
126         rec->u.uss.lun = srb->fcp_lun;
127         memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
128                sizeof(rec->u.uss.queue_designator));
129
130         /* status read buffer payload length */
131         rec->pl_len = (!srb->length) ? 0 : srb->length -
132                         offsetof(struct fsf_status_read_buffer, payload);
133
134         if (rec->pl_len)
135                 zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
136                                   "fsf_uss", req->req_id);
137 log:
138         debug_event(dbf->hba, 2, rec, sizeof(*rec));
139         spin_unlock_irqrestore(&dbf->hba_lock, flags);
140 }
141
142 /**
143  * zfcp_dbf_hba_bit_err - trace event for bit error conditions
144  * @tag: tag indicating which kind of unsolicited status has been received
145  * @req: request which caused the bit_error condition
146  */
147 void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
148 {
149         struct zfcp_dbf *dbf = req->adapter->dbf;
150         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
151         struct fsf_status_read_buffer *sr_buf = req->data;
152         unsigned long flags;
153
154         spin_lock_irqsave(&dbf->hba_lock, flags);
155         memset(rec, 0, sizeof(*rec));
156
157         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
158         rec->id = ZFCP_DBF_HBA_BIT;
159         rec->fsf_req_id = req->req_id;
160         rec->fsf_req_status = req->status;
161         rec->fsf_cmd = req->fsf_command;
162         memcpy(&rec->u.be, &sr_buf->payload.bit_error,
163                sizeof(struct fsf_bit_error_payload));
164
165         debug_event(dbf->hba, 1, rec, sizeof(*rec));
166         spin_unlock_irqrestore(&dbf->hba_lock, flags);
167 }
168
169 /**
170  * zfcp_dbf_hba_def_err - trace event for deferred error messages
171  * @adapter: pointer to struct zfcp_adapter
172  * @req_id: request id which caused the deferred error message
173  * @scount: number of sbals incl. the signaling sbal
174  * @pl: array of all involved sbals
175  */
176 void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
177                           void **pl)
178 {
179         struct zfcp_dbf *dbf = adapter->dbf;
180         struct zfcp_dbf_pay *payload = &dbf->pay_buf;
181         unsigned long flags;
182         u16 length;
183
184         if (!pl)
185                 return;
186
187         spin_lock_irqsave(&dbf->pay_lock, flags);
188         memset(payload, 0, sizeof(*payload));
189
190         memcpy(payload->area, "def_err", 7);
191         payload->fsf_req_id = req_id;
192         payload->counter = 0;
193         length = min((u16)sizeof(struct qdio_buffer),
194                      (u16)ZFCP_DBF_PAY_MAX_REC);
195
196         while (payload->counter < scount && (char *)pl[payload->counter]) {
197                 memcpy(payload->data, (char *)pl[payload->counter], length);
198                 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
199                 payload->counter++;
200         }
201
202         spin_unlock_irqrestore(&dbf->pay_lock, flags);
203 }
204
205 /**
206  * zfcp_dbf_hba_basic - trace event for basic adapter events
207  * @adapter: pointer to struct zfcp_adapter
208  */
209 void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
210 {
211         struct zfcp_dbf *dbf = adapter->dbf;
212         struct zfcp_dbf_hba *rec = &dbf->hba_buf;
213         unsigned long flags;
214
215         spin_lock_irqsave(&dbf->hba_lock, flags);
216         memset(rec, 0, sizeof(*rec));
217
218         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
219         rec->id = ZFCP_DBF_HBA_BASIC;
220
221         debug_event(dbf->hba, 1, rec, sizeof(*rec));
222         spin_unlock_irqrestore(&dbf->hba_lock, flags);
223 }
224
225 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
226                                 struct zfcp_adapter *adapter,
227                                 struct zfcp_port *port,
228                                 struct scsi_device *sdev)
229 {
230         rec->adapter_status = atomic_read(&adapter->status);
231         if (port) {
232                 rec->port_status = atomic_read(&port->status);
233                 rec->wwpn = port->wwpn;
234                 rec->d_id = port->d_id;
235         }
236         if (sdev) {
237                 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
238                 rec->lun = zfcp_scsi_dev_lun(sdev);
239         } else
240                 rec->lun = ZFCP_DBF_INVALID_LUN;
241 }
242
243 /**
244  * zfcp_dbf_rec_trig - trace event related to triggered recovery
245  * @tag: identifier for event
246  * @adapter: adapter on which the erp_action should run
247  * @port: remote port involved in the erp_action
248  * @sdev: scsi device involved in the erp_action
249  * @want: wanted erp_action
250  * @need: required erp_action
251  *
252  * The adapter->erp_lock has to be held.
253  */
254 void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
255                        struct zfcp_port *port, struct scsi_device *sdev,
256                        u8 want, u8 need)
257 {
258         struct zfcp_dbf *dbf = adapter->dbf;
259         struct zfcp_dbf_rec *rec = &dbf->rec_buf;
260         struct list_head *entry;
261         unsigned long flags;
262
263         spin_lock_irqsave(&dbf->rec_lock, flags);
264         memset(rec, 0, sizeof(*rec));
265
266         rec->id = ZFCP_DBF_REC_TRIG;
267         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
268         zfcp_dbf_set_common(rec, adapter, port, sdev);
269
270         list_for_each(entry, &adapter->erp_ready_head)
271                 rec->u.trig.ready++;
272
273         list_for_each(entry, &adapter->erp_running_head)
274                 rec->u.trig.running++;
275
276         rec->u.trig.want = want;
277         rec->u.trig.need = need;
278
279         debug_event(dbf->rec, 1, rec, sizeof(*rec));
280         spin_unlock_irqrestore(&dbf->rec_lock, flags);
281 }
282
283
284 /**
285  * zfcp_dbf_rec_run - trace event related to running recovery
286  * @tag: identifier for event
287  * @erp: erp_action running
288  */
289 void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
290 {
291         struct zfcp_dbf *dbf = erp->adapter->dbf;
292         struct zfcp_dbf_rec *rec = &dbf->rec_buf;
293         unsigned long flags;
294
295         spin_lock_irqsave(&dbf->rec_lock, flags);
296         memset(rec, 0, sizeof(*rec));
297
298         rec->id = ZFCP_DBF_REC_RUN;
299         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
300         zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
301
302         rec->u.run.fsf_req_id = erp->fsf_req_id;
303         rec->u.run.rec_status = erp->status;
304         rec->u.run.rec_step = erp->step;
305         rec->u.run.rec_action = erp->action;
306
307         if (erp->sdev)
308                 rec->u.run.rec_count =
309                         atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
310         else if (erp->port)
311                 rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
312         else
313                 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
314
315         debug_event(dbf->rec, 1, rec, sizeof(*rec));
316         spin_unlock_irqrestore(&dbf->rec_lock, flags);
317 }
318
319 /**
320  * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
321  * @tag: identifier for event
322  * @wka_port: well known address port
323  * @req_id: request ID to correlate with potential HBA trace record
324  */
325 void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
326                           u64 req_id)
327 {
328         struct zfcp_dbf *dbf = wka_port->adapter->dbf;
329         struct zfcp_dbf_rec *rec = &dbf->rec_buf;
330         unsigned long flags;
331
332         spin_lock_irqsave(&dbf->rec_lock, flags);
333         memset(rec, 0, sizeof(*rec));
334
335         rec->id = ZFCP_DBF_REC_RUN;
336         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
337         rec->port_status = wka_port->status;
338         rec->d_id = wka_port->d_id;
339         rec->lun = ZFCP_DBF_INVALID_LUN;
340
341         rec->u.run.fsf_req_id = req_id;
342         rec->u.run.rec_status = ~0;
343         rec->u.run.rec_step = ~0;
344         rec->u.run.rec_action = ~0;
345         rec->u.run.rec_count = ~0;
346
347         debug_event(dbf->rec, 1, rec, sizeof(*rec));
348         spin_unlock_irqrestore(&dbf->rec_lock, flags);
349 }
350
351 static inline
352 void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
353                   char *paytag, struct scatterlist *sg, u8 id, u16 len,
354                   u64 req_id, u32 d_id, u16 cap_len)
355 {
356         struct zfcp_dbf_san *rec = &dbf->san_buf;
357         u16 rec_len;
358         unsigned long flags;
359         struct zfcp_dbf_pay *payload = &dbf->pay_buf;
360         u16 pay_sum = 0;
361
362         spin_lock_irqsave(&dbf->san_lock, flags);
363         memset(rec, 0, sizeof(*rec));
364
365         rec->id = id;
366         rec->fsf_req_id = req_id;
367         rec->d_id = d_id;
368         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
369         rec->pl_len = len; /* full length even if we cap pay below */
370         if (!sg)
371                 goto out;
372         rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
373         memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
374         if (len <= rec_len)
375                 goto out; /* skip pay record if full content in rec->payload */
376
377         /* if (len > rec_len):
378          * dump data up to cap_len ignoring small duplicate in rec->payload
379          */
380         spin_lock(&dbf->pay_lock);
381         memset(payload, 0, sizeof(*payload));
382         memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
383         payload->fsf_req_id = req_id;
384         payload->counter = 0;
385         for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
386                 u16 pay_len, offset = 0;
387
388                 while (offset < sg->length && pay_sum < cap_len) {
389                         pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
390                                       (u16)(sg->length - offset));
391                         /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
392                         memcpy(payload->data, sg_virt(sg) + offset, pay_len);
393                         debug_event(dbf->pay, 1, payload,
394                                     zfcp_dbf_plen(pay_len));
395                         payload->counter++;
396                         offset += pay_len;
397                         pay_sum += pay_len;
398                 }
399         }
400         spin_unlock(&dbf->pay_lock);
401
402 out:
403         debug_event(dbf->san, 1, rec, sizeof(*rec));
404         spin_unlock_irqrestore(&dbf->san_lock, flags);
405 }
406
407 /**
408  * zfcp_dbf_san_req - trace event for issued SAN request
409  * @tag: indentifier for event
410  * @fsf_req: request containing issued CT data
411  * d_id: destination ID
412  */
413 void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
414 {
415         struct zfcp_dbf *dbf = fsf->adapter->dbf;
416         struct zfcp_fsf_ct_els *ct_els = fsf->data;
417         u16 length;
418
419         length = (u16)zfcp_qdio_real_bytes(ct_els->req);
420         zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
421                      length, fsf->req_id, d_id, length);
422 }
423
424 static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
425                                               struct zfcp_fsf_req *fsf,
426                                               u16 len)
427 {
428         struct zfcp_fsf_ct_els *ct_els = fsf->data;
429         struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
430         struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
431         struct scatterlist *resp_entry = ct_els->resp;
432         struct fc_gpn_ft_resp *acc;
433         int max_entries, x, last = 0;
434
435         if (!(memcmp(tag, "fsscth2", 7) == 0
436               && ct_els->d_id == FC_FID_DIR_SERV
437               && reqh->ct_rev == FC_CT_REV
438               && reqh->ct_in_id[0] == 0
439               && reqh->ct_in_id[1] == 0
440               && reqh->ct_in_id[2] == 0
441               && reqh->ct_fs_type == FC_FST_DIR
442               && reqh->ct_fs_subtype == FC_NS_SUBTYPE
443               && reqh->ct_options == 0
444               && reqh->_ct_resvd1 == 0
445               && reqh->ct_cmd == FC_NS_GPN_FT
446               /* reqh->ct_mr_size can vary so do not match but read below */
447               && reqh->_ct_resvd2 == 0
448               && reqh->ct_reason == 0
449               && reqh->ct_explan == 0
450               && reqh->ct_vendor == 0
451               && reqn->fn_resvd == 0
452               && reqn->fn_domain_id_scope == 0
453               && reqn->fn_area_id_scope == 0
454               && reqn->fn_fc4_type == FC_TYPE_FCP))
455                 return len; /* not GPN_FT response so do not cap */
456
457         acc = sg_virt(resp_entry);
458         max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
459                 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
460                      * to account for header as 1st pseudo "entry" */;
461
462         /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
463          * response, allowing us to skip special handling for it - just skip it
464          */
465         for (x = 1; x < max_entries && !last; x++) {
466                 if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
467                         acc++;
468                 else
469                         acc = sg_virt(++resp_entry);
470
471                 last = acc->fp_flags & FC_NS_FID_LAST;
472         }
473         len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
474         return len; /* cap after last entry */
475 }
476
477 /**
478  * zfcp_dbf_san_res - trace event for received SAN request
479  * @tag: indentifier for event
480  * @fsf_req: request containing issued CT data
481  */
482 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
483 {
484         struct zfcp_dbf *dbf = fsf->adapter->dbf;
485         struct zfcp_fsf_ct_els *ct_els = fsf->data;
486         u16 length;
487
488         length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
489         zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
490                      length, fsf->req_id, ct_els->d_id,
491                      zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
492 }
493
494 /**
495  * zfcp_dbf_san_in_els - trace event for incoming ELS
496  * @tag: indentifier for event
497  * @fsf_req: request containing issued CT data
498  */
499 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
500 {
501         struct zfcp_dbf *dbf = fsf->adapter->dbf;
502         struct fsf_status_read_buffer *srb =
503                 (struct fsf_status_read_buffer *) fsf->data;
504         u16 length;
505         struct scatterlist sg;
506
507         length = (u16)(srb->length -
508                         offsetof(struct fsf_status_read_buffer, payload));
509         sg_init_one(&sg, srb->payload.data, length);
510         zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
511                      fsf->req_id, ntoh24(srb->d_id), length);
512 }
513
514 /**
515  * zfcp_dbf_scsi - trace event for scsi commands
516  * @tag: identifier for event
517  * @sc: pointer to struct scsi_cmnd
518  * @fsf: pointer to struct zfcp_fsf_req
519  */
520 void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
521                    struct zfcp_fsf_req *fsf)
522 {
523         struct zfcp_adapter *adapter =
524                 (struct zfcp_adapter *) sc->device->host->hostdata[0];
525         struct zfcp_dbf *dbf = adapter->dbf;
526         struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
527         struct fcp_resp_with_ext *fcp_rsp;
528         struct fcp_resp_rsp_info *fcp_rsp_info;
529         unsigned long flags;
530
531         spin_lock_irqsave(&dbf->scsi_lock, flags);
532         memset(rec, 0, sizeof(*rec));
533
534         memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
535         rec->id = ZFCP_DBF_SCSI_CMND;
536         rec->scsi_result = sc->result;
537         rec->scsi_retries = sc->retries;
538         rec->scsi_allowed = sc->allowed;
539         rec->scsi_id = sc->device->id;
540         rec->scsi_lun = sc->device->lun;
541         rec->host_scribble = (unsigned long)sc->host_scribble;
542
543         memcpy(rec->scsi_opcode, sc->cmnd,
544                min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
545
546         if (fsf) {
547                 rec->fsf_req_id = fsf->req_id;
548                 fcp_rsp = (struct fcp_resp_with_ext *)
549                                 &(fsf->qtcb->bottom.io.fcp_rsp);
550                 memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
551                 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
552                         fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
553                         rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
554                 }
555                 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
556                         rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
557                                           (u16)ZFCP_DBF_PAY_MAX_REC);
558                         zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
559                                           "fcp_sns", fsf->req_id);
560                 }
561         }
562
563         debug_event(dbf->scsi, level, rec, sizeof(*rec));
564         spin_unlock_irqrestore(&dbf->scsi_lock, flags);
565 }
566
567 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
568 {
569         struct debug_info *d;
570
571         d = debug_register(name, size, 1, rec_size);
572         if (!d)
573                 return NULL;
574
575         debug_register_view(d, &debug_hex_ascii_view);
576         debug_set_level(d, 3);
577
578         return d;
579 }
580
581 static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
582 {
583         if (!dbf)
584                 return;
585
586         debug_unregister(dbf->scsi);
587         debug_unregister(dbf->san);
588         debug_unregister(dbf->hba);
589         debug_unregister(dbf->pay);
590         debug_unregister(dbf->rec);
591         kfree(dbf);
592 }
593
594 /**
595  * zfcp_adapter_debug_register - registers debug feature for an adapter
596  * @adapter: pointer to adapter for which debug features should be registered
597  * return: -ENOMEM on error, 0 otherwise
598  */
599 int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
600 {
601         char name[DEBUG_MAX_NAME_LEN];
602         struct zfcp_dbf *dbf;
603
604         dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
605         if (!dbf)
606                 return -ENOMEM;
607
608         spin_lock_init(&dbf->pay_lock);
609         spin_lock_init(&dbf->hba_lock);
610         spin_lock_init(&dbf->san_lock);
611         spin_lock_init(&dbf->scsi_lock);
612         spin_lock_init(&dbf->rec_lock);
613
614         /* debug feature area which records recovery activity */
615         sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
616         dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
617         if (!dbf->rec)
618                 goto err_out;
619
620         /* debug feature area which records HBA (FSF and QDIO) conditions */
621         sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
622         dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
623         if (!dbf->hba)
624                 goto err_out;
625
626         /* debug feature area which records payload info */
627         sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
628         dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
629         if (!dbf->pay)
630                 goto err_out;
631
632         /* debug feature area which records SAN command failures and recovery */
633         sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
634         dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
635         if (!dbf->san)
636                 goto err_out;
637
638         /* debug feature area which records SCSI command failures and recovery */
639         sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
640         dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
641         if (!dbf->scsi)
642                 goto err_out;
643
644         adapter->dbf = dbf;
645
646         return 0;
647 err_out:
648         zfcp_dbf_unregister(dbf);
649         return -ENOMEM;
650 }
651
652 /**
653  * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
654  * @adapter: pointer to adapter for which debug features should be unregistered
655  */
656 void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
657 {
658         struct zfcp_dbf *dbf = adapter->dbf;
659
660         adapter->dbf = NULL;
661         zfcp_dbf_unregister(dbf);
662 }
663