IB/ipath: Clean up some comments
[pandora-kernel.git] / drivers / infiniband / hw / ipath / ipath_verbs.c
1 /*
2  * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <linux/io.h>
37 #include <linux/utsname.h>
38
39 #include "ipath_kernel.h"
40 #include "ipath_verbs.h"
41 #include "ipath_common.h"
42
43 static unsigned int ib_ipath_qp_table_size = 251;
44 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
45 MODULE_PARM_DESC(qp_table_size, "QP table size");
46
47 unsigned int ib_ipath_lkey_table_size = 12;
48 module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
49                    S_IRUGO);
50 MODULE_PARM_DESC(lkey_table_size,
51                  "LKEY table size in bits (2^n, 1 <= n <= 23)");
52
53 static unsigned int ib_ipath_max_pds = 0xFFFF;
54 module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
55 MODULE_PARM_DESC(max_pds,
56                  "Maximum number of protection domains to support");
57
58 static unsigned int ib_ipath_max_ahs = 0xFFFF;
59 module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
60 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
61
62 unsigned int ib_ipath_max_cqes = 0x2FFFF;
63 module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
64 MODULE_PARM_DESC(max_cqes,
65                  "Maximum number of completion queue entries to support");
66
67 unsigned int ib_ipath_max_cqs = 0x1FFFF;
68 module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
69 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
70
71 unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
72 module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
73                    S_IWUSR | S_IRUGO);
74 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
75
76 unsigned int ib_ipath_max_qps = 16384;
77 module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
78 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
79
80 unsigned int ib_ipath_max_sges = 0x60;
81 module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
82 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
83
84 unsigned int ib_ipath_max_mcast_grps = 16384;
85 module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
86                    S_IWUSR | S_IRUGO);
87 MODULE_PARM_DESC(max_mcast_grps,
88                  "Maximum number of multicast groups to support");
89
90 unsigned int ib_ipath_max_mcast_qp_attached = 16;
91 module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
92                    uint, S_IWUSR | S_IRUGO);
93 MODULE_PARM_DESC(max_mcast_qp_attached,
94                  "Maximum number of attached QPs to support");
95
96 unsigned int ib_ipath_max_srqs = 1024;
97 module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
99
100 unsigned int ib_ipath_max_srq_sges = 128;
101 module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
102                    uint, S_IWUSR | S_IRUGO);
103 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
104
105 unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
106 module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
107                    uint, S_IWUSR | S_IRUGO);
108 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
109
110 static unsigned int ib_ipath_disable_sma;
111 module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
112 MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA");
113
114 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
115         [IB_QPS_RESET] = 0,
116         [IB_QPS_INIT] = IPATH_POST_RECV_OK,
117         [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
118         [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
119             IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
120         [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
121             IPATH_POST_SEND_OK,
122         [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
123         [IB_QPS_ERR] = 0,
124 };
125
126 struct ipath_ucontext {
127         struct ib_ucontext ibucontext;
128 };
129
130 static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
131                                                   *ibucontext)
132 {
133         return container_of(ibucontext, struct ipath_ucontext, ibucontext);
134 }
135
136 /*
137  * Translate ib_wr_opcode into ib_wc_opcode.
138  */
139 const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
140         [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
141         [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
142         [IB_WR_SEND] = IB_WC_SEND,
143         [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
144         [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
145         [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
146         [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
147 };
148
149 /*
150  * System image GUID.
151  */
152 static __be64 sys_image_guid;
153
154 /**
155  * ipath_copy_sge - copy data to SGE memory
156  * @ss: the SGE state
157  * @data: the data to copy
158  * @length: the length of the data
159  */
160 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
161 {
162         struct ipath_sge *sge = &ss->sge;
163
164         while (length) {
165                 u32 len = sge->length;
166
167                 if (len > length)
168                         len = length;
169                 if (len > sge->sge_length)
170                         len = sge->sge_length;
171                 BUG_ON(len == 0);
172                 memcpy(sge->vaddr, data, len);
173                 sge->vaddr += len;
174                 sge->length -= len;
175                 sge->sge_length -= len;
176                 if (sge->sge_length == 0) {
177                         if (--ss->num_sge)
178                                 *sge = *ss->sg_list++;
179                 } else if (sge->length == 0 && sge->mr != NULL) {
180                         if (++sge->n >= IPATH_SEGSZ) {
181                                 if (++sge->m >= sge->mr->mapsz)
182                                         break;
183                                 sge->n = 0;
184                         }
185                         sge->vaddr =
186                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
187                         sge->length =
188                                 sge->mr->map[sge->m]->segs[sge->n].length;
189                 }
190                 data += len;
191                 length -= len;
192         }
193 }
194
195 /**
196  * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
197  * @ss: the SGE state
198  * @length: the number of bytes to skip
199  */
200 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
201 {
202         struct ipath_sge *sge = &ss->sge;
203
204         while (length) {
205                 u32 len = sge->length;
206
207                 if (len > length)
208                         len = length;
209                 if (len > sge->sge_length)
210                         len = sge->sge_length;
211                 BUG_ON(len == 0);
212                 sge->vaddr += len;
213                 sge->length -= len;
214                 sge->sge_length -= len;
215                 if (sge->sge_length == 0) {
216                         if (--ss->num_sge)
217                                 *sge = *ss->sg_list++;
218                 } else if (sge->length == 0 && sge->mr != NULL) {
219                         if (++sge->n >= IPATH_SEGSZ) {
220                                 if (++sge->m >= sge->mr->mapsz)
221                                         break;
222                                 sge->n = 0;
223                         }
224                         sge->vaddr =
225                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
226                         sge->length =
227                                 sge->mr->map[sge->m]->segs[sge->n].length;
228                 }
229                 length -= len;
230         }
231 }
232
233 static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
234 {
235         struct ib_wc wc;
236
237         memset(&wc, 0, sizeof(wc));
238         wc.wr_id = wr->wr_id;
239         wc.status = IB_WC_WR_FLUSH_ERR;
240         wc.opcode = ib_ipath_wc_opcode[wr->opcode];
241         wc.qp = &qp->ibqp;
242         ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
243 }
244
245 /**
246  * ipath_post_one_send - post one RC, UC, or UD send work request
247  * @qp: the QP to post on
248  * @wr: the work request to send
249  */
250 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
251 {
252         struct ipath_swqe *wqe;
253         u32 next;
254         int i;
255         int j;
256         int acc;
257         int ret;
258         unsigned long flags;
259
260         spin_lock_irqsave(&qp->s_lock, flags);
261
262         /* Check that state is OK to post send. */
263         if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) {
264                 if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR)
265                         goto bail_inval;
266                 /* C10-96 says generate a flushed completion entry. */
267                 ipath_flush_wqe(qp, wr);
268                 ret = 0;
269                 goto bail;
270         }
271
272         /* IB spec says that num_sge == 0 is OK. */
273         if (wr->num_sge > qp->s_max_sge)
274                 goto bail_inval;
275
276         /*
277          * Don't allow RDMA reads or atomic operations on UC or
278          * undefined operations.
279          * Make sure buffer is large enough to hold the result for atomics.
280          */
281         if (qp->ibqp.qp_type == IB_QPT_UC) {
282                 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
283                         goto bail_inval;
284         } else if (qp->ibqp.qp_type == IB_QPT_UD) {
285                 /* Check UD opcode */
286                 if (wr->opcode != IB_WR_SEND &&
287                     wr->opcode != IB_WR_SEND_WITH_IMM)
288                         goto bail_inval;
289                 /* Check UD destination address PD */
290                 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
291                         goto bail_inval;
292         } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
293                 goto bail_inval;
294         else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
295                    (wr->num_sge == 0 ||
296                     wr->sg_list[0].length < sizeof(u64) ||
297                     wr->sg_list[0].addr & (sizeof(u64) - 1)))
298                 goto bail_inval;
299         else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
300                 goto bail_inval;
301
302         next = qp->s_head + 1;
303         if (next >= qp->s_size)
304                 next = 0;
305         if (next == qp->s_last) {
306                 ret = -ENOMEM;
307                 goto bail;
308         }
309
310         wqe = get_swqe_ptr(qp, qp->s_head);
311         wqe->wr = *wr;
312         wqe->ssn = qp->s_ssn++;
313         wqe->length = 0;
314         if (wr->num_sge) {
315                 acc = wr->opcode >= IB_WR_RDMA_READ ?
316                         IB_ACCESS_LOCAL_WRITE : 0;
317                 for (i = 0, j = 0; i < wr->num_sge; i++) {
318                         u32 length = wr->sg_list[i].length;
319                         int ok;
320
321                         if (length == 0)
322                                 continue;
323                         ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
324                                            &wr->sg_list[i], acc);
325                         if (!ok)
326                                 goto bail_inval;
327                         wqe->length += length;
328                         j++;
329                 }
330                 wqe->wr.num_sge = j;
331         }
332         if (qp->ibqp.qp_type == IB_QPT_UC ||
333             qp->ibqp.qp_type == IB_QPT_RC) {
334                 if (wqe->length > 0x80000000U)
335                         goto bail_inval;
336         } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
337                 goto bail_inval;
338         qp->s_head = next;
339
340         ret = 0;
341         goto bail;
342
343 bail_inval:
344         ret = -EINVAL;
345 bail:
346         spin_unlock_irqrestore(&qp->s_lock, flags);
347         return ret;
348 }
349
350 /**
351  * ipath_post_send - post a send on a QP
352  * @ibqp: the QP to post the send on
353  * @wr: the list of work requests to post
354  * @bad_wr: the first bad WR is put here
355  *
356  * This may be called from interrupt context.
357  */
358 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
359                            struct ib_send_wr **bad_wr)
360 {
361         struct ipath_qp *qp = to_iqp(ibqp);
362         int err = 0;
363
364         for (; wr; wr = wr->next) {
365                 err = ipath_post_one_send(qp, wr);
366                 if (err) {
367                         *bad_wr = wr;
368                         goto bail;
369                 }
370         }
371
372         /* Try to do the send work in the caller's context. */
373         ipath_do_send((unsigned long) qp);
374
375 bail:
376         return err;
377 }
378
379 /**
380  * ipath_post_receive - post a receive on a QP
381  * @ibqp: the QP to post the receive on
382  * @wr: the WR to post
383  * @bad_wr: the first bad WR is put here
384  *
385  * This may be called from interrupt context.
386  */
387 static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
388                               struct ib_recv_wr **bad_wr)
389 {
390         struct ipath_qp *qp = to_iqp(ibqp);
391         struct ipath_rwq *wq = qp->r_rq.wq;
392         unsigned long flags;
393         int ret;
394
395         /* Check that state is OK to post receive. */
396         if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
397                 *bad_wr = wr;
398                 ret = -EINVAL;
399                 goto bail;
400         }
401
402         for (; wr; wr = wr->next) {
403                 struct ipath_rwqe *wqe;
404                 u32 next;
405                 int i;
406
407                 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
408                         *bad_wr = wr;
409                         ret = -EINVAL;
410                         goto bail;
411                 }
412
413                 spin_lock_irqsave(&qp->r_rq.lock, flags);
414                 next = wq->head + 1;
415                 if (next >= qp->r_rq.size)
416                         next = 0;
417                 if (next == wq->tail) {
418                         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
419                         *bad_wr = wr;
420                         ret = -ENOMEM;
421                         goto bail;
422                 }
423
424                 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
425                 wqe->wr_id = wr->wr_id;
426                 wqe->num_sge = wr->num_sge;
427                 for (i = 0; i < wr->num_sge; i++)
428                         wqe->sg_list[i] = wr->sg_list[i];
429                 /* Make sure queue entry is written before the head index. */
430                 smp_wmb();
431                 wq->head = next;
432                 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
433         }
434         ret = 0;
435
436 bail:
437         return ret;
438 }
439
440 /**
441  * ipath_qp_rcv - processing an incoming packet on a QP
442  * @dev: the device the packet came on
443  * @hdr: the packet header
444  * @has_grh: true if the packet has a GRH
445  * @data: the packet data
446  * @tlen: the packet length
447  * @qp: the QP the packet came on
448  *
449  * This is called from ipath_ib_rcv() to process an incoming packet
450  * for the given QP.
451  * Called at interrupt level.
452  */
453 static void ipath_qp_rcv(struct ipath_ibdev *dev,
454                          struct ipath_ib_header *hdr, int has_grh,
455                          void *data, u32 tlen, struct ipath_qp *qp)
456 {
457         /* Check for valid receive state. */
458         if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
459                 dev->n_pkt_drops++;
460                 return;
461         }
462
463         switch (qp->ibqp.qp_type) {
464         case IB_QPT_SMI:
465         case IB_QPT_GSI:
466                 if (ib_ipath_disable_sma)
467                         break;
468                 /* FALLTHROUGH */
469         case IB_QPT_UD:
470                 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
471                 break;
472
473         case IB_QPT_RC:
474                 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
475                 break;
476
477         case IB_QPT_UC:
478                 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
479                 break;
480
481         default:
482                 break;
483         }
484 }
485
486 /**
487  * ipath_ib_rcv - process an incoming packet
488  * @arg: the device pointer
489  * @rhdr: the header of the packet
490  * @data: the packet data
491  * @tlen: the packet length
492  *
493  * This is called from ipath_kreceive() to process an incoming packet at
494  * interrupt level. Tlen is the length of the header + data + CRC in bytes.
495  */
496 void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
497                   u32 tlen)
498 {
499         struct ipath_ib_header *hdr = rhdr;
500         struct ipath_other_headers *ohdr;
501         struct ipath_qp *qp;
502         u32 qp_num;
503         int lnh;
504         u8 opcode;
505         u16 lid;
506
507         if (unlikely(dev == NULL))
508                 goto bail;
509
510         if (unlikely(tlen < 24)) {      /* LRH+BTH+CRC */
511                 dev->rcv_errors++;
512                 goto bail;
513         }
514
515         /* Check for a valid destination LID (see ch. 7.11.1). */
516         lid = be16_to_cpu(hdr->lrh[1]);
517         if (lid < IPATH_MULTICAST_LID_BASE) {
518                 lid &= ~((1 << dev->dd->ipath_lmc) - 1);
519                 if (unlikely(lid != dev->dd->ipath_lid)) {
520                         dev->rcv_errors++;
521                         goto bail;
522                 }
523         }
524
525         /* Check for GRH */
526         lnh = be16_to_cpu(hdr->lrh[0]) & 3;
527         if (lnh == IPATH_LRH_BTH)
528                 ohdr = &hdr->u.oth;
529         else if (lnh == IPATH_LRH_GRH)
530                 ohdr = &hdr->u.l.oth;
531         else {
532                 dev->rcv_errors++;
533                 goto bail;
534         }
535
536         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
537         dev->opstats[opcode].n_bytes += tlen;
538         dev->opstats[opcode].n_packets++;
539
540         /* Get the destination QP number. */
541         qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
542         if (qp_num == IPATH_MULTICAST_QPN) {
543                 struct ipath_mcast *mcast;
544                 struct ipath_mcast_qp *p;
545
546                 if (lnh != IPATH_LRH_GRH) {
547                         dev->n_pkt_drops++;
548                         goto bail;
549                 }
550                 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
551                 if (mcast == NULL) {
552                         dev->n_pkt_drops++;
553                         goto bail;
554                 }
555                 dev->n_multicast_rcv++;
556                 list_for_each_entry_rcu(p, &mcast->qp_list, list)
557                         ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
558                 /*
559                  * Notify ipath_multicast_detach() if it is waiting for us
560                  * to finish.
561                  */
562                 if (atomic_dec_return(&mcast->refcount) <= 1)
563                         wake_up(&mcast->wait);
564         } else {
565                 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
566                 if (qp) {
567                         dev->n_unicast_rcv++;
568                         ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
569                                      tlen, qp);
570                         /*
571                          * Notify ipath_destroy_qp() if it is waiting
572                          * for us to finish.
573                          */
574                         if (atomic_dec_and_test(&qp->refcount))
575                                 wake_up(&qp->wait);
576                 } else
577                         dev->n_pkt_drops++;
578         }
579
580 bail:;
581 }
582
583 /**
584  * ipath_ib_timer - verbs timer
585  * @arg: the device pointer
586  *
587  * This is called from ipath_do_rcv_timer() at interrupt level to check for
588  * QPs which need retransmits and to collect performance numbers.
589  */
590 static void ipath_ib_timer(struct ipath_ibdev *dev)
591 {
592         struct ipath_qp *resend = NULL;
593         struct list_head *last;
594         struct ipath_qp *qp;
595         unsigned long flags;
596
597         if (dev == NULL)
598                 return;
599
600         spin_lock_irqsave(&dev->pending_lock, flags);
601         /* Start filling the next pending queue. */
602         if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
603                 dev->pending_index = 0;
604         /* Save any requests still in the new queue, they have timed out. */
605         last = &dev->pending[dev->pending_index];
606         while (!list_empty(last)) {
607                 qp = list_entry(last->next, struct ipath_qp, timerwait);
608                 list_del_init(&qp->timerwait);
609                 qp->timer_next = resend;
610                 resend = qp;
611                 atomic_inc(&qp->refcount);
612         }
613         last = &dev->rnrwait;
614         if (!list_empty(last)) {
615                 qp = list_entry(last->next, struct ipath_qp, timerwait);
616                 if (--qp->s_rnr_timeout == 0) {
617                         do {
618                                 list_del_init(&qp->timerwait);
619                                 tasklet_hi_schedule(&qp->s_task);
620                                 if (list_empty(last))
621                                         break;
622                                 qp = list_entry(last->next, struct ipath_qp,
623                                                 timerwait);
624                         } while (qp->s_rnr_timeout == 0);
625                 }
626         }
627         /*
628          * We should only be in the started state if pma_sample_start != 0
629          */
630         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
631             --dev->pma_sample_start == 0) {
632                 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
633                 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
634                                         &dev->ipath_rword,
635                                         &dev->ipath_spkts,
636                                         &dev->ipath_rpkts,
637                                         &dev->ipath_xmit_wait);
638         }
639         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
640                 if (dev->pma_sample_interval == 0) {
641                         u64 ta, tb, tc, td, te;
642
643                         dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
644                         ipath_snapshot_counters(dev->dd, &ta, &tb,
645                                                 &tc, &td, &te);
646
647                         dev->ipath_sword = ta - dev->ipath_sword;
648                         dev->ipath_rword = tb - dev->ipath_rword;
649                         dev->ipath_spkts = tc - dev->ipath_spkts;
650                         dev->ipath_rpkts = td - dev->ipath_rpkts;
651                         dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
652                 }
653                 else
654                         dev->pma_sample_interval--;
655         }
656         spin_unlock_irqrestore(&dev->pending_lock, flags);
657
658         /* XXX What if timer fires again while this is running? */
659         for (qp = resend; qp != NULL; qp = qp->timer_next) {
660                 struct ib_wc wc;
661
662                 spin_lock_irqsave(&qp->s_lock, flags);
663                 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
664                         dev->n_timeouts++;
665                         ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
666                 }
667                 spin_unlock_irqrestore(&qp->s_lock, flags);
668
669                 /* Notify ipath_destroy_qp() if it is waiting. */
670                 if (atomic_dec_and_test(&qp->refcount))
671                         wake_up(&qp->wait);
672         }
673 }
674
675 static void update_sge(struct ipath_sge_state *ss, u32 length)
676 {
677         struct ipath_sge *sge = &ss->sge;
678
679         sge->vaddr += length;
680         sge->length -= length;
681         sge->sge_length -= length;
682         if (sge->sge_length == 0) {
683                 if (--ss->num_sge)
684                         *sge = *ss->sg_list++;
685         } else if (sge->length == 0 && sge->mr != NULL) {
686                 if (++sge->n >= IPATH_SEGSZ) {
687                         if (++sge->m >= sge->mr->mapsz)
688                                 return;
689                         sge->n = 0;
690                 }
691                 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
692                 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
693         }
694 }
695
696 #ifdef __LITTLE_ENDIAN
697 static inline u32 get_upper_bits(u32 data, u32 shift)
698 {
699         return data >> shift;
700 }
701
702 static inline u32 set_upper_bits(u32 data, u32 shift)
703 {
704         return data << shift;
705 }
706
707 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
708 {
709         data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
710         data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
711         return data;
712 }
713 #else
714 static inline u32 get_upper_bits(u32 data, u32 shift)
715 {
716         return data << shift;
717 }
718
719 static inline u32 set_upper_bits(u32 data, u32 shift)
720 {
721         return data >> shift;
722 }
723
724 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
725 {
726         data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
727         data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
728         return data;
729 }
730 #endif
731
732 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
733                     u32 length, unsigned flush_wc)
734 {
735         u32 extra = 0;
736         u32 data = 0;
737         u32 last;
738
739         while (1) {
740                 u32 len = ss->sge.length;
741                 u32 off;
742
743                 if (len > length)
744                         len = length;
745                 if (len > ss->sge.sge_length)
746                         len = ss->sge.sge_length;
747                 BUG_ON(len == 0);
748                 /* If the source address is not aligned, try to align it. */
749                 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
750                 if (off) {
751                         u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
752                                             ~(sizeof(u32) - 1));
753                         u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
754                         u32 y;
755
756                         y = sizeof(u32) - off;
757                         if (len > y)
758                                 len = y;
759                         if (len + extra >= sizeof(u32)) {
760                                 data |= set_upper_bits(v, extra *
761                                                        BITS_PER_BYTE);
762                                 len = sizeof(u32) - extra;
763                                 if (len == length) {
764                                         last = data;
765                                         break;
766                                 }
767                                 __raw_writel(data, piobuf);
768                                 piobuf++;
769                                 extra = 0;
770                                 data = 0;
771                         } else {
772                                 /* Clear unused upper bytes */
773                                 data |= clear_upper_bytes(v, len, extra);
774                                 if (len == length) {
775                                         last = data;
776                                         break;
777                                 }
778                                 extra += len;
779                         }
780                 } else if (extra) {
781                         /* Source address is aligned. */
782                         u32 *addr = (u32 *) ss->sge.vaddr;
783                         int shift = extra * BITS_PER_BYTE;
784                         int ushift = 32 - shift;
785                         u32 l = len;
786
787                         while (l >= sizeof(u32)) {
788                                 u32 v = *addr;
789
790                                 data |= set_upper_bits(v, shift);
791                                 __raw_writel(data, piobuf);
792                                 data = get_upper_bits(v, ushift);
793                                 piobuf++;
794                                 addr++;
795                                 l -= sizeof(u32);
796                         }
797                         /*
798                          * We still have 'extra' number of bytes leftover.
799                          */
800                         if (l) {
801                                 u32 v = *addr;
802
803                                 if (l + extra >= sizeof(u32)) {
804                                         data |= set_upper_bits(v, shift);
805                                         len -= l + extra - sizeof(u32);
806                                         if (len == length) {
807                                                 last = data;
808                                                 break;
809                                         }
810                                         __raw_writel(data, piobuf);
811                                         piobuf++;
812                                         extra = 0;
813                                         data = 0;
814                                 } else {
815                                         /* Clear unused upper bytes */
816                                         data |= clear_upper_bytes(v, l,
817                                                                   extra);
818                                         if (len == length) {
819                                                 last = data;
820                                                 break;
821                                         }
822                                         extra += l;
823                                 }
824                         } else if (len == length) {
825                                 last = data;
826                                 break;
827                         }
828                 } else if (len == length) {
829                         u32 w;
830
831                         /*
832                          * Need to round up for the last dword in the
833                          * packet.
834                          */
835                         w = (len + 3) >> 2;
836                         __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
837                         piobuf += w - 1;
838                         last = ((u32 *) ss->sge.vaddr)[w - 1];
839                         break;
840                 } else {
841                         u32 w = len >> 2;
842
843                         __iowrite32_copy(piobuf, ss->sge.vaddr, w);
844                         piobuf += w;
845
846                         extra = len & (sizeof(u32) - 1);
847                         if (extra) {
848                                 u32 v = ((u32 *) ss->sge.vaddr)[w];
849
850                                 /* Clear unused upper bytes */
851                                 data = clear_upper_bytes(v, extra, 0);
852                         }
853                 }
854                 update_sge(ss, len);
855                 length -= len;
856         }
857         /* Update address before sending packet. */
858         update_sge(ss, length);
859         if (flush_wc) {
860                 /* must flush early everything before trigger word */
861                 ipath_flush_wc();
862                 __raw_writel(last, piobuf);
863                 /* be sure trigger word is written */
864                 ipath_flush_wc();
865         } else
866                 __raw_writel(last, piobuf);
867 }
868
869 static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
870                                 struct ipath_sge_state *ss, u32 len,
871                                 u32 plen, u32 dwords)
872 {
873         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
874         u32 __iomem *piobuf;
875         unsigned flush_wc;
876         int ret;
877
878         piobuf = ipath_getpiobuf(dd, NULL);
879         if (unlikely(piobuf == NULL)) {
880                 ret = -EBUSY;
881                 goto bail;
882         }
883
884         /*
885          * Write len to control qword, no flags.
886          * We have to flush after the PBC for correctness on some cpus
887          * or WC buffer can be written out of order.
888          */
889         writeq(plen, piobuf);
890         piobuf += 2;
891
892         flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
893         if (len == 0) {
894                 /*
895                  * If there is just the header portion, must flush before
896                  * writing last word of header for correctness, and after
897                  * the last header word (trigger word).
898                  */
899                 if (flush_wc) {
900                         ipath_flush_wc();
901                         __iowrite32_copy(piobuf, hdr, hdrwords - 1);
902                         ipath_flush_wc();
903                         __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
904                         ipath_flush_wc();
905                 } else
906                         __iowrite32_copy(piobuf, hdr, hdrwords);
907                 goto done;
908         }
909
910         if (flush_wc)
911                 ipath_flush_wc();
912         __iowrite32_copy(piobuf, hdr, hdrwords);
913         piobuf += hdrwords;
914
915         /* The common case is aligned and contained in one segment. */
916         if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
917                    !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
918                 u32 *addr = (u32 *) ss->sge.vaddr;
919
920                 /* Update address before sending packet. */
921                 update_sge(ss, len);
922                 if (flush_wc) {
923                         __iowrite32_copy(piobuf, addr, dwords - 1);
924                         /* must flush early everything before trigger word */
925                         ipath_flush_wc();
926                         __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
927                         /* be sure trigger word is written */
928                         ipath_flush_wc();
929                 } else
930                         __iowrite32_copy(piobuf, addr, dwords);
931                 goto done;
932         }
933         copy_io(piobuf, ss, len, flush_wc);
934 done:
935         if (qp->s_wqe)
936                 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
937         ret = 0;
938 bail:
939         return ret;
940 }
941
942 /**
943  * ipath_verbs_send - send a packet
944  * @qp: the QP to send on
945  * @hdr: the packet header
946  * @hdrwords: the number of 32-bit words in the header
947  * @ss: the SGE to send
948  * @len: the length of the packet in bytes
949  */
950 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
951                      u32 hdrwords, struct ipath_sge_state *ss, u32 len)
952 {
953         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
954         u32 plen;
955         int ret;
956         u32 dwords = (len + 3) >> 2;
957
958         /*
959          * Calculate the send buffer trigger address.
960          * The +1 counts for the pbc control dword following the pbc length.
961          */
962         plen = hdrwords + dwords + 1;
963
964         /* Drop non-VL15 packets if we are not in the active state */
965         if (!(dd->ipath_flags & IPATH_LINKACTIVE) &&
966             qp->ibqp.qp_type != IB_QPT_SMI) {
967                 if (qp->s_wqe)
968                         ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
969                 ret = 0;
970         } else
971                 ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords,
972                                            ss, len, plen, dwords);
973
974         return ret;
975 }
976
977 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
978                             u64 *rwords, u64 *spkts, u64 *rpkts,
979                             u64 *xmit_wait)
980 {
981         int ret;
982
983         if (!(dd->ipath_flags & IPATH_INITTED)) {
984                 /* no hardware, freeze, etc. */
985                 ret = -EINVAL;
986                 goto bail;
987         }
988         *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
989         *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
990         *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
991         *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
992         *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
993
994         ret = 0;
995
996 bail:
997         return ret;
998 }
999
1000 /**
1001  * ipath_get_counters - get various chip counters
1002  * @dd: the infinipath device
1003  * @cntrs: counters are placed here
1004  *
1005  * Return the counters needed by recv_pma_get_portcounters().
1006  */
1007 int ipath_get_counters(struct ipath_devdata *dd,
1008                        struct ipath_verbs_counters *cntrs)
1009 {
1010         struct ipath_cregs const *crp = dd->ipath_cregs;
1011         int ret;
1012
1013         if (!(dd->ipath_flags & IPATH_INITTED)) {
1014                 /* no hardware, freeze, etc. */
1015                 ret = -EINVAL;
1016                 goto bail;
1017         }
1018         cntrs->symbol_error_counter =
1019                 ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
1020         cntrs->link_error_recovery_counter =
1021                 ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
1022         /*
1023          * The link downed counter counts when the other side downs the
1024          * connection.  We add in the number of times we downed the link
1025          * due to local link integrity errors to compensate.
1026          */
1027         cntrs->link_downed_counter =
1028                 ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
1029         cntrs->port_rcv_errors =
1030                 ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
1031                 ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
1032                 ipath_snap_cntr(dd, crp->cr_portovflcnt) +
1033                 ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
1034                 ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
1035                 ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
1036                 ipath_snap_cntr(dd, crp->cr_erricrccnt) +
1037                 ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
1038                 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1039                 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1040                 dd->ipath_rxfc_unsupvl_errs;
1041         cntrs->port_rcv_remphys_errors =
1042                 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1043         cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
1044         cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
1045         cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
1046         cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1047         cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1048         cntrs->local_link_integrity_errors =
1049                 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1050                 dd->ipath_lli_errs : dd->ipath_lli_errors;
1051         cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs;
1052
1053         ret = 0;
1054
1055 bail:
1056         return ret;
1057 }
1058
1059 /**
1060  * ipath_ib_piobufavail - callback when a PIO buffer is available
1061  * @arg: the device pointer
1062  *
1063  * This is called from ipath_intr() at interrupt level when a PIO buffer is
1064  * available after ipath_verbs_send() returned an error that no buffers were
1065  * available.  Return 1 if we consumed all the PIO buffers and we still have
1066  * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
1067  * return zero).
1068  */
1069 int ipath_ib_piobufavail(struct ipath_ibdev *dev)
1070 {
1071         struct ipath_qp *qp;
1072         unsigned long flags;
1073
1074         if (dev == NULL)
1075                 goto bail;
1076
1077         spin_lock_irqsave(&dev->pending_lock, flags);
1078         while (!list_empty(&dev->piowait)) {
1079                 qp = list_entry(dev->piowait.next, struct ipath_qp,
1080                                 piowait);
1081                 list_del_init(&qp->piowait);
1082                 clear_bit(IPATH_S_BUSY, &qp->s_busy);
1083                 tasklet_hi_schedule(&qp->s_task);
1084         }
1085         spin_unlock_irqrestore(&dev->pending_lock, flags);
1086
1087 bail:
1088         return 0;
1089 }
1090
1091 static int ipath_query_device(struct ib_device *ibdev,
1092                               struct ib_device_attr *props)
1093 {
1094         struct ipath_ibdev *dev = to_idev(ibdev);
1095
1096         memset(props, 0, sizeof(*props));
1097
1098         props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1099                 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1100                 IB_DEVICE_SYS_IMAGE_GUID;
1101         props->page_size_cap = PAGE_SIZE;
1102         props->vendor_id = dev->dd->ipath_vendorid;
1103         props->vendor_part_id = dev->dd->ipath_deviceid;
1104         props->hw_ver = dev->dd->ipath_pcirev;
1105
1106         props->sys_image_guid = dev->sys_image_guid;
1107
1108         props->max_mr_size = ~0ull;
1109         props->max_qp = ib_ipath_max_qps;
1110         props->max_qp_wr = ib_ipath_max_qp_wrs;
1111         props->max_sge = ib_ipath_max_sges;
1112         props->max_cq = ib_ipath_max_cqs;
1113         props->max_ah = ib_ipath_max_ahs;
1114         props->max_cqe = ib_ipath_max_cqes;
1115         props->max_mr = dev->lk_table.max;
1116         props->max_fmr = dev->lk_table.max;
1117         props->max_map_per_fmr = 32767;
1118         props->max_pd = ib_ipath_max_pds;
1119         props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
1120         props->max_qp_init_rd_atom = 255;
1121         /* props->max_res_rd_atom */
1122         props->max_srq = ib_ipath_max_srqs;
1123         props->max_srq_wr = ib_ipath_max_srq_wrs;
1124         props->max_srq_sge = ib_ipath_max_srq_sges;
1125         /* props->local_ca_ack_delay */
1126         props->atomic_cap = IB_ATOMIC_GLOB;
1127         props->max_pkeys = ipath_get_npkeys(dev->dd);
1128         props->max_mcast_grp = ib_ipath_max_mcast_grps;
1129         props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
1130         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1131                 props->max_mcast_grp;
1132
1133         return 0;
1134 }
1135
1136 const u8 ipath_cvt_physportstate[16] = {
1137         [INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
1138         [INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
1139         [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
1140         [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
1141         [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
1142         [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
1143         [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
1144         [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
1145         [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
1146         [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
1147         [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
1148         [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
1149         [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
1150 };
1151
1152 u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1153 {
1154         return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1155 }
1156
1157 static int ipath_query_port(struct ib_device *ibdev,
1158                             u8 port, struct ib_port_attr *props)
1159 {
1160         struct ipath_ibdev *dev = to_idev(ibdev);
1161         struct ipath_devdata *dd = dev->dd;
1162         enum ib_mtu mtu;
1163         u16 lid = dd->ipath_lid;
1164         u64 ibcstat;
1165
1166         memset(props, 0, sizeof(*props));
1167         props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1168         props->lmc = dd->ipath_lmc;
1169         props->sm_lid = dev->sm_lid;
1170         props->sm_sl = dev->sm_sl;
1171         ibcstat = dd->ipath_lastibcstat;
1172         props->state = ((ibcstat >> 4) & 0x3) + 1;
1173         /* See phys_state_show() */
1174         props->phys_state = ipath_cvt_physportstate[
1175                 dd->ipath_lastibcstat & 0xf];
1176         props->port_cap_flags = dev->port_cap_flags;
1177         props->gid_tbl_len = 1;
1178         props->max_msg_sz = 0x80000000;
1179         props->pkey_tbl_len = ipath_get_npkeys(dd);
1180         props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1181                 dev->z_pkey_violations;
1182         props->qkey_viol_cntr = dev->qkey_violations;
1183         props->active_width = IB_WIDTH_4X;
1184         /* See rate_show() */
1185         props->active_speed = 1;        /* Regular 10Mbs speed. */
1186         props->max_vl_num = 1;          /* VLCap = VL0 */
1187         props->init_type_reply = 0;
1188
1189         /*
1190          * Note: the chip supports a maximum MTU of 4096, but the driver
1191          * hasn't implemented this feature yet, so set the maximum value
1192          * to 2048.
1193          */
1194         props->max_mtu = IB_MTU_2048;
1195         switch (dd->ipath_ibmtu) {
1196         case 4096:
1197                 mtu = IB_MTU_4096;
1198                 break;
1199         case 2048:
1200                 mtu = IB_MTU_2048;
1201                 break;
1202         case 1024:
1203                 mtu = IB_MTU_1024;
1204                 break;
1205         case 512:
1206                 mtu = IB_MTU_512;
1207                 break;
1208         case 256:
1209                 mtu = IB_MTU_256;
1210                 break;
1211         default:
1212                 mtu = IB_MTU_2048;
1213         }
1214         props->active_mtu = mtu;
1215         props->subnet_timeout = dev->subnet_timeout;
1216
1217         return 0;
1218 }
1219
1220 static int ipath_modify_device(struct ib_device *device,
1221                                int device_modify_mask,
1222                                struct ib_device_modify *device_modify)
1223 {
1224         int ret;
1225
1226         if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1227                                    IB_DEVICE_MODIFY_NODE_DESC)) {
1228                 ret = -EOPNOTSUPP;
1229                 goto bail;
1230         }
1231
1232         if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
1233                 memcpy(device->node_desc, device_modify->node_desc, 64);
1234
1235         if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
1236                 to_idev(device)->sys_image_guid =
1237                         cpu_to_be64(device_modify->sys_image_guid);
1238
1239         ret = 0;
1240
1241 bail:
1242         return ret;
1243 }
1244
1245 static int ipath_modify_port(struct ib_device *ibdev,
1246                              u8 port, int port_modify_mask,
1247                              struct ib_port_modify *props)
1248 {
1249         struct ipath_ibdev *dev = to_idev(ibdev);
1250
1251         dev->port_cap_flags |= props->set_port_cap_mask;
1252         dev->port_cap_flags &= ~props->clr_port_cap_mask;
1253         if (port_modify_mask & IB_PORT_SHUTDOWN)
1254                 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
1255         if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1256                 dev->qkey_violations = 0;
1257         return 0;
1258 }
1259
1260 static int ipath_query_gid(struct ib_device *ibdev, u8 port,
1261                            int index, union ib_gid *gid)
1262 {
1263         struct ipath_ibdev *dev = to_idev(ibdev);
1264         int ret;
1265
1266         if (index >= 1) {
1267                 ret = -EINVAL;
1268                 goto bail;
1269         }
1270         gid->global.subnet_prefix = dev->gid_prefix;
1271         gid->global.interface_id = dev->dd->ipath_guid;
1272
1273         ret = 0;
1274
1275 bail:
1276         return ret;
1277 }
1278
1279 static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
1280                                     struct ib_ucontext *context,
1281                                     struct ib_udata *udata)
1282 {
1283         struct ipath_ibdev *dev = to_idev(ibdev);
1284         struct ipath_pd *pd;
1285         struct ib_pd *ret;
1286
1287         /*
1288          * This is actually totally arbitrary.  Some correctness tests
1289          * assume there's a maximum number of PDs that can be allocated.
1290          * We don't actually have this limit, but we fail the test if
1291          * we allow allocations of more than we report for this value.
1292          */
1293
1294         pd = kmalloc(sizeof *pd, GFP_KERNEL);
1295         if (!pd) {
1296                 ret = ERR_PTR(-ENOMEM);
1297                 goto bail;
1298         }
1299
1300         spin_lock(&dev->n_pds_lock);
1301         if (dev->n_pds_allocated == ib_ipath_max_pds) {
1302                 spin_unlock(&dev->n_pds_lock);
1303                 kfree(pd);
1304                 ret = ERR_PTR(-ENOMEM);
1305                 goto bail;
1306         }
1307
1308         dev->n_pds_allocated++;
1309         spin_unlock(&dev->n_pds_lock);
1310
1311         /* ib_alloc_pd() will initialize pd->ibpd. */
1312         pd->user = udata != NULL;
1313
1314         ret = &pd->ibpd;
1315
1316 bail:
1317         return ret;
1318 }
1319
1320 static int ipath_dealloc_pd(struct ib_pd *ibpd)
1321 {
1322         struct ipath_pd *pd = to_ipd(ibpd);
1323         struct ipath_ibdev *dev = to_idev(ibpd->device);
1324
1325         spin_lock(&dev->n_pds_lock);
1326         dev->n_pds_allocated--;
1327         spin_unlock(&dev->n_pds_lock);
1328
1329         kfree(pd);
1330
1331         return 0;
1332 }
1333
1334 /**
1335  * ipath_create_ah - create an address handle
1336  * @pd: the protection domain
1337  * @ah_attr: the attributes of the AH
1338  *
1339  * This may be called from interrupt context.
1340  */
1341 static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1342                                      struct ib_ah_attr *ah_attr)
1343 {
1344         struct ipath_ah *ah;
1345         struct ib_ah *ret;
1346         struct ipath_ibdev *dev = to_idev(pd->device);
1347         unsigned long flags;
1348
1349         /* A multicast address requires a GRH (see ch. 8.4.1). */
1350         if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
1351             ah_attr->dlid != IPATH_PERMISSIVE_LID &&
1352             !(ah_attr->ah_flags & IB_AH_GRH)) {
1353                 ret = ERR_PTR(-EINVAL);
1354                 goto bail;
1355         }
1356
1357         if (ah_attr->dlid == 0) {
1358                 ret = ERR_PTR(-EINVAL);
1359                 goto bail;
1360         }
1361
1362         if (ah_attr->port_num < 1 ||
1363             ah_attr->port_num > pd->device->phys_port_cnt) {
1364                 ret = ERR_PTR(-EINVAL);
1365                 goto bail;
1366         }
1367
1368         ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1369         if (!ah) {
1370                 ret = ERR_PTR(-ENOMEM);
1371                 goto bail;
1372         }
1373
1374         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1375         if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1376                 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1377                 kfree(ah);
1378                 ret = ERR_PTR(-ENOMEM);
1379                 goto bail;
1380         }
1381
1382         dev->n_ahs_allocated++;
1383         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1384
1385         /* ib_create_ah() will initialize ah->ibah. */
1386         ah->attr = *ah_attr;
1387
1388         ret = &ah->ibah;
1389
1390 bail:
1391         return ret;
1392 }
1393
1394 /**
1395  * ipath_destroy_ah - destroy an address handle
1396  * @ibah: the AH to destroy
1397  *
1398  * This may be called from interrupt context.
1399  */
1400 static int ipath_destroy_ah(struct ib_ah *ibah)
1401 {
1402         struct ipath_ibdev *dev = to_idev(ibah->device);
1403         struct ipath_ah *ah = to_iah(ibah);
1404         unsigned long flags;
1405
1406         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1407         dev->n_ahs_allocated--;
1408         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1409
1410         kfree(ah);
1411
1412         return 0;
1413 }
1414
1415 static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1416 {
1417         struct ipath_ah *ah = to_iah(ibah);
1418
1419         *ah_attr = ah->attr;
1420
1421         return 0;
1422 }
1423
1424 /**
1425  * ipath_get_npkeys - return the size of the PKEY table for port 0
1426  * @dd: the infinipath device
1427  */
1428 unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1429 {
1430         return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1431 }
1432
1433 /**
1434  * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1435  * @dd: the infinipath device
1436  * @index: the PKEY index
1437  */
1438 unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1439 {
1440         unsigned ret;
1441
1442         if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1443                 ret = 0;
1444         else
1445                 ret = dd->ipath_pd[0]->port_pkeys[index];
1446
1447         return ret;
1448 }
1449
1450 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1451                             u16 *pkey)
1452 {
1453         struct ipath_ibdev *dev = to_idev(ibdev);
1454         int ret;
1455
1456         if (index >= ipath_get_npkeys(dev->dd)) {
1457                 ret = -EINVAL;
1458                 goto bail;
1459         }
1460
1461         *pkey = ipath_get_pkey(dev->dd, index);
1462         ret = 0;
1463
1464 bail:
1465         return ret;
1466 }
1467
1468 /**
1469  * ipath_alloc_ucontext - allocate a ucontest
1470  * @ibdev: the infiniband device
1471  * @udata: not used by the InfiniPath driver
1472  */
1473
1474 static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
1475                                                 struct ib_udata *udata)
1476 {
1477         struct ipath_ucontext *context;
1478         struct ib_ucontext *ret;
1479
1480         context = kmalloc(sizeof *context, GFP_KERNEL);
1481         if (!context) {
1482                 ret = ERR_PTR(-ENOMEM);
1483                 goto bail;
1484         }
1485
1486         ret = &context->ibucontext;
1487
1488 bail:
1489         return ret;
1490 }
1491
1492 static int ipath_dealloc_ucontext(struct ib_ucontext *context)
1493 {
1494         kfree(to_iucontext(context));
1495         return 0;
1496 }
1497
1498 static int ipath_verbs_register_sysfs(struct ib_device *dev);
1499
1500 static void __verbs_timer(unsigned long arg)
1501 {
1502         struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1503
1504         /* Handle verbs layer timeouts. */
1505         ipath_ib_timer(dd->verbs_dev);
1506
1507         mod_timer(&dd->verbs_timer, jiffies + 1);
1508 }
1509
1510 static int enable_timer(struct ipath_devdata *dd)
1511 {
1512         /*
1513          * Early chips had a design flaw where the chip and kernel idea
1514          * of the tail register don't always agree, and therefore we won't
1515          * get an interrupt on the next packet received.
1516          * If the board supports per packet receive interrupts, use it.
1517          * Otherwise, the timer function periodically checks for packets
1518          * to cover this case.
1519          * Either way, the timer is needed for verbs layer related
1520          * processing.
1521          */
1522         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1523                 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1524                                  0x2074076542310ULL);
1525                 /* Enable GPIO bit 2 interrupt */
1526                 dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
1527                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1528                                  dd->ipath_gpio_mask);
1529         }
1530
1531         init_timer(&dd->verbs_timer);
1532         dd->verbs_timer.function = __verbs_timer;
1533         dd->verbs_timer.data = (unsigned long)dd;
1534         dd->verbs_timer.expires = jiffies + 1;
1535         add_timer(&dd->verbs_timer);
1536
1537         return 0;
1538 }
1539
1540 static int disable_timer(struct ipath_devdata *dd)
1541 {
1542         /* Disable GPIO bit 2 interrupt */
1543         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1544                 /* Disable GPIO bit 2 interrupt */
1545                 dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
1546                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1547                                  dd->ipath_gpio_mask);
1548                 /*
1549                  * We might want to undo changes to debugportselect,
1550                  * but how?
1551                  */
1552         }
1553
1554         del_timer_sync(&dd->verbs_timer);
1555
1556         return 0;
1557 }
1558
1559 /**
1560  * ipath_register_ib_device - register our device with the infiniband core
1561  * @dd: the device data structure
1562  * Return the allocated ipath_ibdev pointer or NULL on error.
1563  */
1564 int ipath_register_ib_device(struct ipath_devdata *dd)
1565 {
1566         struct ipath_verbs_counters cntrs;
1567         struct ipath_ibdev *idev;
1568         struct ib_device *dev;
1569         int ret;
1570
1571         idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
1572         if (idev == NULL) {
1573                 ret = -ENOMEM;
1574                 goto bail;
1575         }
1576
1577         dev = &idev->ibdev;
1578
1579         /* Only need to initialize non-zero fields. */
1580         spin_lock_init(&idev->n_pds_lock);
1581         spin_lock_init(&idev->n_ahs_lock);
1582         spin_lock_init(&idev->n_cqs_lock);
1583         spin_lock_init(&idev->n_qps_lock);
1584         spin_lock_init(&idev->n_srqs_lock);
1585         spin_lock_init(&idev->n_mcast_grps_lock);
1586
1587         spin_lock_init(&idev->qp_table.lock);
1588         spin_lock_init(&idev->lk_table.lock);
1589         idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1590         /* Set the prefix to the default value (see ch. 4.1.1) */
1591         idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
1592
1593         ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
1594         if (ret)
1595                 goto err_qp;
1596
1597         /*
1598          * The top ib_ipath_lkey_table_size bits are used to index the
1599          * table.  The lower 8 bits can be owned by the user (copied from
1600          * the LKEY).  The remaining bits act as a generation number or tag.
1601          */
1602         idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
1603         idev->lk_table.table = kzalloc(idev->lk_table.max *
1604                                        sizeof(*idev->lk_table.table),
1605                                        GFP_KERNEL);
1606         if (idev->lk_table.table == NULL) {
1607                 ret = -ENOMEM;
1608                 goto err_lk;
1609         }
1610         INIT_LIST_HEAD(&idev->pending_mmaps);
1611         spin_lock_init(&idev->pending_lock);
1612         idev->mmap_offset = PAGE_SIZE;
1613         spin_lock_init(&idev->mmap_offset_lock);
1614         INIT_LIST_HEAD(&idev->pending[0]);
1615         INIT_LIST_HEAD(&idev->pending[1]);
1616         INIT_LIST_HEAD(&idev->pending[2]);
1617         INIT_LIST_HEAD(&idev->piowait);
1618         INIT_LIST_HEAD(&idev->rnrwait);
1619         idev->pending_index = 0;
1620         idev->port_cap_flags =
1621                 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1622         idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1623         idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1624         idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1625         idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1626         idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1627         idev->link_width_enabled = 3;   /* 1x or 4x */
1628
1629         /* Snapshot current HW counters to "clear" them. */
1630         ipath_get_counters(dd, &cntrs);
1631         idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1632         idev->z_link_error_recovery_counter =
1633                 cntrs.link_error_recovery_counter;
1634         idev->z_link_downed_counter = cntrs.link_downed_counter;
1635         idev->z_port_rcv_errors = cntrs.port_rcv_errors;
1636         idev->z_port_rcv_remphys_errors =
1637                 cntrs.port_rcv_remphys_errors;
1638         idev->z_port_xmit_discards = cntrs.port_xmit_discards;
1639         idev->z_port_xmit_data = cntrs.port_xmit_data;
1640         idev->z_port_rcv_data = cntrs.port_rcv_data;
1641         idev->z_port_xmit_packets = cntrs.port_xmit_packets;
1642         idev->z_port_rcv_packets = cntrs.port_rcv_packets;
1643         idev->z_local_link_integrity_errors =
1644                 cntrs.local_link_integrity_errors;
1645         idev->z_excessive_buffer_overrun_errors =
1646                 cntrs.excessive_buffer_overrun_errors;
1647         idev->z_vl15_dropped = cntrs.vl15_dropped;
1648
1649         /*
1650          * The system image GUID is supposed to be the same for all
1651          * IB HCAs in a single system but since there can be other
1652          * device types in the system, we can't be sure this is unique.
1653          */
1654         if (!sys_image_guid)
1655                 sys_image_guid = dd->ipath_guid;
1656         idev->sys_image_guid = sys_image_guid;
1657         idev->ib_unit = dd->ipath_unit;
1658         idev->dd = dd;
1659
1660         strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1661         dev->owner = THIS_MODULE;
1662         dev->node_guid = dd->ipath_guid;
1663         dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1664         dev->uverbs_cmd_mask =
1665                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
1666                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
1667                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
1668                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
1669                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
1670                 (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
1671                 (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
1672                 (1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
1673                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
1674                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
1675                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1676                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
1677                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
1678                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
1679                 (1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
1680                 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
1681                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
1682                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
1683                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
1684                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
1685                 (1ull << IB_USER_VERBS_CMD_POST_SEND)           |
1686                 (1ull << IB_USER_VERBS_CMD_POST_RECV)           |
1687                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
1688                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
1689                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
1690                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
1691                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
1692                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
1693                 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1694         dev->node_type = RDMA_NODE_IB_CA;
1695         dev->phys_port_cnt = 1;
1696         dev->num_comp_vectors = 1;
1697         dev->dma_device = &dd->pcidev->dev;
1698         dev->query_device = ipath_query_device;
1699         dev->modify_device = ipath_modify_device;
1700         dev->query_port = ipath_query_port;
1701         dev->modify_port = ipath_modify_port;
1702         dev->query_pkey = ipath_query_pkey;
1703         dev->query_gid = ipath_query_gid;
1704         dev->alloc_ucontext = ipath_alloc_ucontext;
1705         dev->dealloc_ucontext = ipath_dealloc_ucontext;
1706         dev->alloc_pd = ipath_alloc_pd;
1707         dev->dealloc_pd = ipath_dealloc_pd;
1708         dev->create_ah = ipath_create_ah;
1709         dev->destroy_ah = ipath_destroy_ah;
1710         dev->query_ah = ipath_query_ah;
1711         dev->create_srq = ipath_create_srq;
1712         dev->modify_srq = ipath_modify_srq;
1713         dev->query_srq = ipath_query_srq;
1714         dev->destroy_srq = ipath_destroy_srq;
1715         dev->create_qp = ipath_create_qp;
1716         dev->modify_qp = ipath_modify_qp;
1717         dev->query_qp = ipath_query_qp;
1718         dev->destroy_qp = ipath_destroy_qp;
1719         dev->post_send = ipath_post_send;
1720         dev->post_recv = ipath_post_receive;
1721         dev->post_srq_recv = ipath_post_srq_receive;
1722         dev->create_cq = ipath_create_cq;
1723         dev->destroy_cq = ipath_destroy_cq;
1724         dev->resize_cq = ipath_resize_cq;
1725         dev->poll_cq = ipath_poll_cq;
1726         dev->req_notify_cq = ipath_req_notify_cq;
1727         dev->get_dma_mr = ipath_get_dma_mr;
1728         dev->reg_phys_mr = ipath_reg_phys_mr;
1729         dev->reg_user_mr = ipath_reg_user_mr;
1730         dev->dereg_mr = ipath_dereg_mr;
1731         dev->alloc_fmr = ipath_alloc_fmr;
1732         dev->map_phys_fmr = ipath_map_phys_fmr;
1733         dev->unmap_fmr = ipath_unmap_fmr;
1734         dev->dealloc_fmr = ipath_dealloc_fmr;
1735         dev->attach_mcast = ipath_multicast_attach;
1736         dev->detach_mcast = ipath_multicast_detach;
1737         dev->process_mad = ipath_process_mad;
1738         dev->mmap = ipath_mmap;
1739         dev->dma_ops = &ipath_dma_mapping_ops;
1740
1741         snprintf(dev->node_desc, sizeof(dev->node_desc),
1742                  IPATH_IDSTR " %s", init_utsname()->nodename);
1743
1744         ret = ib_register_device(dev);
1745         if (ret)
1746                 goto err_reg;
1747
1748         if (ipath_verbs_register_sysfs(dev))
1749                 goto err_class;
1750
1751         enable_timer(dd);
1752
1753         goto bail;
1754
1755 err_class:
1756         ib_unregister_device(dev);
1757 err_reg:
1758         kfree(idev->lk_table.table);
1759 err_lk:
1760         kfree(idev->qp_table.table);
1761 err_qp:
1762         ib_dealloc_device(dev);
1763         ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1764         idev = NULL;
1765
1766 bail:
1767         dd->verbs_dev = idev;
1768         return ret;
1769 }
1770
1771 void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1772 {
1773         struct ib_device *ibdev = &dev->ibdev;
1774
1775         disable_timer(dev->dd);
1776
1777         ib_unregister_device(ibdev);
1778
1779         if (!list_empty(&dev->pending[0]) ||
1780             !list_empty(&dev->pending[1]) ||
1781             !list_empty(&dev->pending[2]))
1782                 ipath_dev_err(dev->dd, "pending list not empty!\n");
1783         if (!list_empty(&dev->piowait))
1784                 ipath_dev_err(dev->dd, "piowait list not empty!\n");
1785         if (!list_empty(&dev->rnrwait))
1786                 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
1787         if (!ipath_mcast_tree_empty())
1788                 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
1789         /*
1790          * Note that ipath_unregister_ib_device() can be called before all
1791          * the QPs are destroyed!
1792          */
1793         ipath_free_all_qps(&dev->qp_table);
1794         kfree(dev->qp_table.table);
1795         kfree(dev->lk_table.table);
1796         ib_dealloc_device(ibdev);
1797 }
1798
1799 static ssize_t show_rev(struct class_device *cdev, char *buf)
1800 {
1801         struct ipath_ibdev *dev =
1802                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1803
1804         return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1805 }
1806
1807 static ssize_t show_hca(struct class_device *cdev, char *buf)
1808 {
1809         struct ipath_ibdev *dev =
1810                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1811         int ret;
1812
1813         ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1814         if (ret < 0)
1815                 goto bail;
1816         strcat(buf, "\n");
1817         ret = strlen(buf);
1818
1819 bail:
1820         return ret;
1821 }
1822
1823 static ssize_t show_stats(struct class_device *cdev, char *buf)
1824 {
1825         struct ipath_ibdev *dev =
1826                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1827         int i;
1828         int len;
1829
1830         len = sprintf(buf,
1831                       "RC resends  %d\n"
1832                       "RC no QACK  %d\n"
1833                       "RC ACKs     %d\n"
1834                       "RC SEQ NAKs %d\n"
1835                       "RC RDMA seq %d\n"
1836                       "RC RNR NAKs %d\n"
1837                       "RC OTH NAKs %d\n"
1838                       "RC timeouts %d\n"
1839                       "RC RDMA dup %d\n"
1840                       "RC stalls   %d\n"
1841                       "piobuf wait %d\n"
1842                       "no piobuf   %d\n"
1843                       "PKT drops   %d\n"
1844                       "WQE errs    %d\n",
1845                       dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1846                       dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1847                       dev->n_other_naks, dev->n_timeouts,
1848                       dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
1849                       dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
1850         for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1851                 const struct ipath_opcode_stats *si = &dev->opstats[i];
1852
1853                 if (!si->n_packets && !si->n_bytes)
1854                         continue;
1855                 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
1856                                (unsigned long long) si->n_packets,
1857                                (unsigned long long) si->n_bytes);
1858         }
1859         return len;
1860 }
1861
1862 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1863 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1864 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
1865 static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
1866
1867 static struct class_device_attribute *ipath_class_attributes[] = {
1868         &class_device_attr_hw_rev,
1869         &class_device_attr_hca_type,
1870         &class_device_attr_board_id,
1871         &class_device_attr_stats
1872 };
1873
1874 static int ipath_verbs_register_sysfs(struct ib_device *dev)
1875 {
1876         int i;
1877         int ret;
1878
1879         for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
1880                 if (class_device_create_file(&dev->class_dev,
1881                                              ipath_class_attributes[i])) {
1882                         ret = 1;
1883                         goto bail;
1884                 }
1885
1886         ret = 0;
1887
1888 bail:
1889         return ret;
1890 }