a2baa618daf2c53c1cbcf90084263434e8cf447e
[pandora-kernel.git] / drivers / infiniband / hw / ipath / ipath_verbs.c
1 /*
2  * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <linux/io.h>
37 #include <linux/utsname.h>
38
39 #include "ipath_kernel.h"
40 #include "ipath_verbs.h"
41 #include "ipath_common.h"
42
43 static unsigned int ib_ipath_qp_table_size = 251;
44 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
45 MODULE_PARM_DESC(qp_table_size, "QP table size");
46
47 unsigned int ib_ipath_lkey_table_size = 12;
48 module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
49                    S_IRUGO);
50 MODULE_PARM_DESC(lkey_table_size,
51                  "LKEY table size in bits (2^n, 1 <= n <= 23)");
52
53 static unsigned int ib_ipath_max_pds = 0xFFFF;
54 module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
55 MODULE_PARM_DESC(max_pds,
56                  "Maximum number of protection domains to support");
57
58 static unsigned int ib_ipath_max_ahs = 0xFFFF;
59 module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
60 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
61
62 unsigned int ib_ipath_max_cqes = 0x2FFFF;
63 module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
64 MODULE_PARM_DESC(max_cqes,
65                  "Maximum number of completion queue entries to support");
66
67 unsigned int ib_ipath_max_cqs = 0x1FFFF;
68 module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
69 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
70
71 unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
72 module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
73                    S_IWUSR | S_IRUGO);
74 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
75
76 unsigned int ib_ipath_max_qps = 16384;
77 module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
78 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
79
80 unsigned int ib_ipath_max_sges = 0x60;
81 module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
82 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
83
84 unsigned int ib_ipath_max_mcast_grps = 16384;
85 module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
86                    S_IWUSR | S_IRUGO);
87 MODULE_PARM_DESC(max_mcast_grps,
88                  "Maximum number of multicast groups to support");
89
90 unsigned int ib_ipath_max_mcast_qp_attached = 16;
91 module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
92                    uint, S_IWUSR | S_IRUGO);
93 MODULE_PARM_DESC(max_mcast_qp_attached,
94                  "Maximum number of attached QPs to support");
95
96 unsigned int ib_ipath_max_srqs = 1024;
97 module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
99
100 unsigned int ib_ipath_max_srq_sges = 128;
101 module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
102                    uint, S_IWUSR | S_IRUGO);
103 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
104
105 unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
106 module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
107                    uint, S_IWUSR | S_IRUGO);
108 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
109
110 static unsigned int ib_ipath_disable_sma;
111 module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
112 MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA");
113
114 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
115         [IB_QPS_RESET] = 0,
116         [IB_QPS_INIT] = IPATH_POST_RECV_OK,
117         [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
118         [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
119             IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
120         [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
121             IPATH_POST_SEND_OK,
122         [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
123         [IB_QPS_ERR] = 0,
124 };
125
126 struct ipath_ucontext {
127         struct ib_ucontext ibucontext;
128 };
129
130 static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
131                                                   *ibucontext)
132 {
133         return container_of(ibucontext, struct ipath_ucontext, ibucontext);
134 }
135
136 /*
137  * Translate ib_wr_opcode into ib_wc_opcode.
138  */
139 const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
140         [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
141         [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
142         [IB_WR_SEND] = IB_WC_SEND,
143         [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
144         [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
145         [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
146         [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
147 };
148
149 /*
150  * System image GUID.
151  */
152 static __be64 sys_image_guid;
153
154 /**
155  * ipath_copy_sge - copy data to SGE memory
156  * @ss: the SGE state
157  * @data: the data to copy
158  * @length: the length of the data
159  */
160 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
161 {
162         struct ipath_sge *sge = &ss->sge;
163
164         while (length) {
165                 u32 len = sge->length;
166
167                 if (len > length)
168                         len = length;
169                 if (len > sge->sge_length)
170                         len = sge->sge_length;
171                 BUG_ON(len == 0);
172                 memcpy(sge->vaddr, data, len);
173                 sge->vaddr += len;
174                 sge->length -= len;
175                 sge->sge_length -= len;
176                 if (sge->sge_length == 0) {
177                         if (--ss->num_sge)
178                                 *sge = *ss->sg_list++;
179                 } else if (sge->length == 0 && sge->mr != NULL) {
180                         if (++sge->n >= IPATH_SEGSZ) {
181                                 if (++sge->m >= sge->mr->mapsz)
182                                         break;
183                                 sge->n = 0;
184                         }
185                         sge->vaddr =
186                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
187                         sge->length =
188                                 sge->mr->map[sge->m]->segs[sge->n].length;
189                 }
190                 data += len;
191                 length -= len;
192         }
193 }
194
195 /**
196  * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
197  * @ss: the SGE state
198  * @length: the number of bytes to skip
199  */
200 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
201 {
202         struct ipath_sge *sge = &ss->sge;
203
204         while (length) {
205                 u32 len = sge->length;
206
207                 if (len > length)
208                         len = length;
209                 if (len > sge->sge_length)
210                         len = sge->sge_length;
211                 BUG_ON(len == 0);
212                 sge->vaddr += len;
213                 sge->length -= len;
214                 sge->sge_length -= len;
215                 if (sge->sge_length == 0) {
216                         if (--ss->num_sge)
217                                 *sge = *ss->sg_list++;
218                 } else if (sge->length == 0 && sge->mr != NULL) {
219                         if (++sge->n >= IPATH_SEGSZ) {
220                                 if (++sge->m >= sge->mr->mapsz)
221                                         break;
222                                 sge->n = 0;
223                         }
224                         sge->vaddr =
225                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
226                         sge->length =
227                                 sge->mr->map[sge->m]->segs[sge->n].length;
228                 }
229                 length -= len;
230         }
231 }
232
233 static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
234 {
235         struct ib_wc wc;
236
237         memset(&wc, 0, sizeof(wc));
238         wc.wr_id = wr->wr_id;
239         wc.status = IB_WC_WR_FLUSH_ERR;
240         wc.opcode = ib_ipath_wc_opcode[wr->opcode];
241         wc.qp = &qp->ibqp;
242         ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
243 }
244
245 /**
246  * ipath_post_one_send - post one RC, UC, or UD send work request
247  * @qp: the QP to post on
248  * @wr: the work request to send
249  */
250 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
251 {
252         struct ipath_swqe *wqe;
253         u32 next;
254         int i;
255         int j;
256         int acc;
257         int ret;
258         unsigned long flags;
259
260         spin_lock_irqsave(&qp->s_lock, flags);
261
262         /* Check that state is OK to post send. */
263         if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) {
264                 if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR)
265                         goto bail_inval;
266                 /* C10-96 says generate a flushed completion entry. */
267                 ipath_flush_wqe(qp, wr);
268                 ret = 0;
269                 goto bail;
270         }
271
272         /* IB spec says that num_sge == 0 is OK. */
273         if (wr->num_sge > qp->s_max_sge)
274                 goto bail_inval;
275
276         /*
277          * Don't allow RDMA reads or atomic operations on UC or
278          * undefined operations.
279          * Make sure buffer is large enough to hold the result for atomics.
280          */
281         if (qp->ibqp.qp_type == IB_QPT_UC) {
282                 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
283                         goto bail_inval;
284         } else if (qp->ibqp.qp_type == IB_QPT_UD) {
285                 /* Check UD opcode */
286                 if (wr->opcode != IB_WR_SEND &&
287                     wr->opcode != IB_WR_SEND_WITH_IMM)
288                         goto bail_inval;
289                 /* Check UD destination address PD */
290                 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
291                         goto bail_inval;
292         } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
293                 goto bail_inval;
294         else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
295                    (wr->num_sge == 0 ||
296                     wr->sg_list[0].length < sizeof(u64) ||
297                     wr->sg_list[0].addr & (sizeof(u64) - 1)))
298                 goto bail_inval;
299         else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
300                 goto bail_inval;
301
302         next = qp->s_head + 1;
303         if (next >= qp->s_size)
304                 next = 0;
305         if (next == qp->s_last) {
306                 ret = -ENOMEM;
307                 goto bail;
308         }
309
310         wqe = get_swqe_ptr(qp, qp->s_head);
311         wqe->wr = *wr;
312         wqe->ssn = qp->s_ssn++;
313         wqe->length = 0;
314         if (wr->num_sge) {
315                 acc = wr->opcode >= IB_WR_RDMA_READ ?
316                         IB_ACCESS_LOCAL_WRITE : 0;
317                 for (i = 0, j = 0; i < wr->num_sge; i++) {
318                         u32 length = wr->sg_list[i].length;
319                         int ok;
320
321                         if (length == 0)
322                                 continue;
323                         ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
324                                            &wr->sg_list[i], acc);
325                         if (!ok)
326                                 goto bail_inval;
327                         wqe->length += length;
328                         j++;
329                 }
330                 wqe->wr.num_sge = j;
331         }
332         if (qp->ibqp.qp_type == IB_QPT_UC ||
333             qp->ibqp.qp_type == IB_QPT_RC) {
334                 if (wqe->length > 0x80000000U)
335                         goto bail_inval;
336         } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
337                 goto bail_inval;
338         qp->s_head = next;
339
340         ret = 0;
341         goto bail;
342
343 bail_inval:
344         ret = -EINVAL;
345 bail:
346         spin_unlock_irqrestore(&qp->s_lock, flags);
347         return ret;
348 }
349
350 /**
351  * ipath_post_send - post a send on a QP
352  * @ibqp: the QP to post the send on
353  * @wr: the list of work requests to post
354  * @bad_wr: the first bad WR is put here
355  *
356  * This may be called from interrupt context.
357  */
358 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
359                            struct ib_send_wr **bad_wr)
360 {
361         struct ipath_qp *qp = to_iqp(ibqp);
362         int err = 0;
363
364         for (; wr; wr = wr->next) {
365                 err = ipath_post_one_send(qp, wr);
366                 if (err) {
367                         *bad_wr = wr;
368                         goto bail;
369                 }
370         }
371
372         /* Try to do the send work in the caller's context. */
373         ipath_do_send((unsigned long) qp);
374
375 bail:
376         return err;
377 }
378
379 /**
380  * ipath_post_receive - post a receive on a QP
381  * @ibqp: the QP to post the receive on
382  * @wr: the WR to post
383  * @bad_wr: the first bad WR is put here
384  *
385  * This may be called from interrupt context.
386  */
387 static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
388                               struct ib_recv_wr **bad_wr)
389 {
390         struct ipath_qp *qp = to_iqp(ibqp);
391         struct ipath_rwq *wq = qp->r_rq.wq;
392         unsigned long flags;
393         int ret;
394
395         /* Check that state is OK to post receive. */
396         if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
397                 *bad_wr = wr;
398                 ret = -EINVAL;
399                 goto bail;
400         }
401
402         for (; wr; wr = wr->next) {
403                 struct ipath_rwqe *wqe;
404                 u32 next;
405                 int i;
406
407                 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
408                         *bad_wr = wr;
409                         ret = -EINVAL;
410                         goto bail;
411                 }
412
413                 spin_lock_irqsave(&qp->r_rq.lock, flags);
414                 next = wq->head + 1;
415                 if (next >= qp->r_rq.size)
416                         next = 0;
417                 if (next == wq->tail) {
418                         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
419                         *bad_wr = wr;
420                         ret = -ENOMEM;
421                         goto bail;
422                 }
423
424                 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
425                 wqe->wr_id = wr->wr_id;
426                 wqe->num_sge = wr->num_sge;
427                 for (i = 0; i < wr->num_sge; i++)
428                         wqe->sg_list[i] = wr->sg_list[i];
429                 /* Make sure queue entry is written before the head index. */
430                 smp_wmb();
431                 wq->head = next;
432                 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
433         }
434         ret = 0;
435
436 bail:
437         return ret;
438 }
439
440 /**
441  * ipath_qp_rcv - processing an incoming packet on a QP
442  * @dev: the device the packet came on
443  * @hdr: the packet header
444  * @has_grh: true if the packet has a GRH
445  * @data: the packet data
446  * @tlen: the packet length
447  * @qp: the QP the packet came on
448  *
449  * This is called from ipath_ib_rcv() to process an incoming packet
450  * for the given QP.
451  * Called at interrupt level.
452  */
453 static void ipath_qp_rcv(struct ipath_ibdev *dev,
454                          struct ipath_ib_header *hdr, int has_grh,
455                          void *data, u32 tlen, struct ipath_qp *qp)
456 {
457         /* Check for valid receive state. */
458         if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
459                 dev->n_pkt_drops++;
460                 return;
461         }
462
463         switch (qp->ibqp.qp_type) {
464         case IB_QPT_SMI:
465         case IB_QPT_GSI:
466                 if (ib_ipath_disable_sma)
467                         break;
468                 /* FALLTHROUGH */
469         case IB_QPT_UD:
470                 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
471                 break;
472
473         case IB_QPT_RC:
474                 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
475                 break;
476
477         case IB_QPT_UC:
478                 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
479                 break;
480
481         default:
482                 break;
483         }
484 }
485
486 /**
487  * ipath_ib_rcv - process an incoming packet
488  * @arg: the device pointer
489  * @rhdr: the header of the packet
490  * @data: the packet data
491  * @tlen: the packet length
492  *
493  * This is called from ipath_kreceive() to process an incoming packet at
494  * interrupt level. Tlen is the length of the header + data + CRC in bytes.
495  */
496 void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
497                   u32 tlen)
498 {
499         struct ipath_ib_header *hdr = rhdr;
500         struct ipath_other_headers *ohdr;
501         struct ipath_qp *qp;
502         u32 qp_num;
503         int lnh;
504         u8 opcode;
505         u16 lid;
506
507         if (unlikely(dev == NULL))
508                 goto bail;
509
510         if (unlikely(tlen < 24)) {      /* LRH+BTH+CRC */
511                 dev->rcv_errors++;
512                 goto bail;
513         }
514
515         /* Check for a valid destination LID (see ch. 7.11.1). */
516         lid = be16_to_cpu(hdr->lrh[1]);
517         if (lid < IPATH_MULTICAST_LID_BASE) {
518                 lid &= ~((1 << dev->dd->ipath_lmc) - 1);
519                 if (unlikely(lid != dev->dd->ipath_lid)) {
520                         dev->rcv_errors++;
521                         goto bail;
522                 }
523         }
524
525         /* Check for GRH */
526         lnh = be16_to_cpu(hdr->lrh[0]) & 3;
527         if (lnh == IPATH_LRH_BTH)
528                 ohdr = &hdr->u.oth;
529         else if (lnh == IPATH_LRH_GRH)
530                 ohdr = &hdr->u.l.oth;
531         else {
532                 dev->rcv_errors++;
533                 goto bail;
534         }
535
536         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
537         dev->opstats[opcode].n_bytes += tlen;
538         dev->opstats[opcode].n_packets++;
539
540         /* Get the destination QP number. */
541         qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
542         if (qp_num == IPATH_MULTICAST_QPN) {
543                 struct ipath_mcast *mcast;
544                 struct ipath_mcast_qp *p;
545
546                 if (lnh != IPATH_LRH_GRH) {
547                         dev->n_pkt_drops++;
548                         goto bail;
549                 }
550                 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
551                 if (mcast == NULL) {
552                         dev->n_pkt_drops++;
553                         goto bail;
554                 }
555                 dev->n_multicast_rcv++;
556                 list_for_each_entry_rcu(p, &mcast->qp_list, list)
557                         ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
558                 /*
559                  * Notify ipath_multicast_detach() if it is waiting for us
560                  * to finish.
561                  */
562                 if (atomic_dec_return(&mcast->refcount) <= 1)
563                         wake_up(&mcast->wait);
564         } else {
565                 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
566                 if (qp) {
567                         dev->n_unicast_rcv++;
568                         ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
569                                      tlen, qp);
570                         /*
571                          * Notify ipath_destroy_qp() if it is waiting
572                          * for us to finish.
573                          */
574                         if (atomic_dec_and_test(&qp->refcount))
575                                 wake_up(&qp->wait);
576                 } else
577                         dev->n_pkt_drops++;
578         }
579
580 bail:;
581 }
582
583 /**
584  * ipath_ib_timer - verbs timer
585  * @arg: the device pointer
586  *
587  * This is called from ipath_do_rcv_timer() at interrupt level to check for
588  * QPs which need retransmits and to collect performance numbers.
589  */
590 static void ipath_ib_timer(struct ipath_ibdev *dev)
591 {
592         struct ipath_qp *resend = NULL;
593         struct list_head *last;
594         struct ipath_qp *qp;
595         unsigned long flags;
596
597         if (dev == NULL)
598                 return;
599
600         spin_lock_irqsave(&dev->pending_lock, flags);
601         /* Start filling the next pending queue. */
602         if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
603                 dev->pending_index = 0;
604         /* Save any requests still in the new queue, they have timed out. */
605         last = &dev->pending[dev->pending_index];
606         while (!list_empty(last)) {
607                 qp = list_entry(last->next, struct ipath_qp, timerwait);
608                 list_del_init(&qp->timerwait);
609                 qp->timer_next = resend;
610                 resend = qp;
611                 atomic_inc(&qp->refcount);
612         }
613         last = &dev->rnrwait;
614         if (!list_empty(last)) {
615                 qp = list_entry(last->next, struct ipath_qp, timerwait);
616                 if (--qp->s_rnr_timeout == 0) {
617                         do {
618                                 list_del_init(&qp->timerwait);
619                                 tasklet_hi_schedule(&qp->s_task);
620                                 if (list_empty(last))
621                                         break;
622                                 qp = list_entry(last->next, struct ipath_qp,
623                                                 timerwait);
624                         } while (qp->s_rnr_timeout == 0);
625                 }
626         }
627         /*
628          * We should only be in the started state if pma_sample_start != 0
629          */
630         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
631             --dev->pma_sample_start == 0) {
632                 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
633                 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
634                                         &dev->ipath_rword,
635                                         &dev->ipath_spkts,
636                                         &dev->ipath_rpkts,
637                                         &dev->ipath_xmit_wait);
638         }
639         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
640                 if (dev->pma_sample_interval == 0) {
641                         u64 ta, tb, tc, td, te;
642
643                         dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
644                         ipath_snapshot_counters(dev->dd, &ta, &tb,
645                                                 &tc, &td, &te);
646
647                         dev->ipath_sword = ta - dev->ipath_sword;
648                         dev->ipath_rword = tb - dev->ipath_rword;
649                         dev->ipath_spkts = tc - dev->ipath_spkts;
650                         dev->ipath_rpkts = td - dev->ipath_rpkts;
651                         dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
652                 }
653                 else
654                         dev->pma_sample_interval--;
655         }
656         spin_unlock_irqrestore(&dev->pending_lock, flags);
657
658         /* XXX What if timer fires again while this is running? */
659         for (qp = resend; qp != NULL; qp = qp->timer_next) {
660                 struct ib_wc wc;
661
662                 spin_lock_irqsave(&qp->s_lock, flags);
663                 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
664                         dev->n_timeouts++;
665                         ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
666                 }
667                 spin_unlock_irqrestore(&qp->s_lock, flags);
668
669                 /* Notify ipath_destroy_qp() if it is waiting. */
670                 if (atomic_dec_and_test(&qp->refcount))
671                         wake_up(&qp->wait);
672         }
673 }
674
675 static void update_sge(struct ipath_sge_state *ss, u32 length)
676 {
677         struct ipath_sge *sge = &ss->sge;
678
679         sge->vaddr += length;
680         sge->length -= length;
681         sge->sge_length -= length;
682         if (sge->sge_length == 0) {
683                 if (--ss->num_sge)
684                         *sge = *ss->sg_list++;
685         } else if (sge->length == 0 && sge->mr != NULL) {
686                 if (++sge->n >= IPATH_SEGSZ) {
687                         if (++sge->m >= sge->mr->mapsz)
688                                 return;
689                         sge->n = 0;
690                 }
691                 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
692                 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
693         }
694 }
695
696 #ifdef __LITTLE_ENDIAN
697 static inline u32 get_upper_bits(u32 data, u32 shift)
698 {
699         return data >> shift;
700 }
701
702 static inline u32 set_upper_bits(u32 data, u32 shift)
703 {
704         return data << shift;
705 }
706
707 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
708 {
709         data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
710         data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
711         return data;
712 }
713 #else
714 static inline u32 get_upper_bits(u32 data, u32 shift)
715 {
716         return data << shift;
717 }
718
719 static inline u32 set_upper_bits(u32 data, u32 shift)
720 {
721         return data >> shift;
722 }
723
724 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
725 {
726         data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
727         data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
728         return data;
729 }
730 #endif
731
732 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
733                     u32 length, unsigned flush_wc)
734 {
735         u32 extra = 0;
736         u32 data = 0;
737         u32 last;
738
739         while (1) {
740                 u32 len = ss->sge.length;
741                 u32 off;
742
743                 if (len > length)
744                         len = length;
745                 if (len > ss->sge.sge_length)
746                         len = ss->sge.sge_length;
747                 BUG_ON(len == 0);
748                 /* If the source address is not aligned, try to align it. */
749                 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
750                 if (off) {
751                         u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
752                                             ~(sizeof(u32) - 1));
753                         u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
754                         u32 y;
755
756                         y = sizeof(u32) - off;
757                         if (len > y)
758                                 len = y;
759                         if (len + extra >= sizeof(u32)) {
760                                 data |= set_upper_bits(v, extra *
761                                                        BITS_PER_BYTE);
762                                 len = sizeof(u32) - extra;
763                                 if (len == length) {
764                                         last = data;
765                                         break;
766                                 }
767                                 __raw_writel(data, piobuf);
768                                 piobuf++;
769                                 extra = 0;
770                                 data = 0;
771                         } else {
772                                 /* Clear unused upper bytes */
773                                 data |= clear_upper_bytes(v, len, extra);
774                                 if (len == length) {
775                                         last = data;
776                                         break;
777                                 }
778                                 extra += len;
779                         }
780                 } else if (extra) {
781                         /* Source address is aligned. */
782                         u32 *addr = (u32 *) ss->sge.vaddr;
783                         int shift = extra * BITS_PER_BYTE;
784                         int ushift = 32 - shift;
785                         u32 l = len;
786
787                         while (l >= sizeof(u32)) {
788                                 u32 v = *addr;
789
790                                 data |= set_upper_bits(v, shift);
791                                 __raw_writel(data, piobuf);
792                                 data = get_upper_bits(v, ushift);
793                                 piobuf++;
794                                 addr++;
795                                 l -= sizeof(u32);
796                         }
797                         /*
798                          * We still have 'extra' number of bytes leftover.
799                          */
800                         if (l) {
801                                 u32 v = *addr;
802
803                                 if (l + extra >= sizeof(u32)) {
804                                         data |= set_upper_bits(v, shift);
805                                         len -= l + extra - sizeof(u32);
806                                         if (len == length) {
807                                                 last = data;
808                                                 break;
809                                         }
810                                         __raw_writel(data, piobuf);
811                                         piobuf++;
812                                         extra = 0;
813                                         data = 0;
814                                 } else {
815                                         /* Clear unused upper bytes */
816                                         data |= clear_upper_bytes(v, l,
817                                                                   extra);
818                                         if (len == length) {
819                                                 last = data;
820                                                 break;
821                                         }
822                                         extra += l;
823                                 }
824                         } else if (len == length) {
825                                 last = data;
826                                 break;
827                         }
828                 } else if (len == length) {
829                         u32 w;
830
831                         /*
832                          * Need to round up for the last dword in the
833                          * packet.
834                          */
835                         w = (len + 3) >> 2;
836                         __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
837                         piobuf += w - 1;
838                         last = ((u32 *) ss->sge.vaddr)[w - 1];
839                         break;
840                 } else {
841                         u32 w = len >> 2;
842
843                         __iowrite32_copy(piobuf, ss->sge.vaddr, w);
844                         piobuf += w;
845
846                         extra = len & (sizeof(u32) - 1);
847                         if (extra) {
848                                 u32 v = ((u32 *) ss->sge.vaddr)[w];
849
850                                 /* Clear unused upper bytes */
851                                 data = clear_upper_bytes(v, extra, 0);
852                         }
853                 }
854                 update_sge(ss, len);
855                 length -= len;
856         }
857         /* Update address before sending packet. */
858         update_sge(ss, length);
859         if (flush_wc) {
860                 /* must flush early everything before trigger word */
861                 ipath_flush_wc();
862                 __raw_writel(last, piobuf);
863                 /* be sure trigger word is written */
864                 ipath_flush_wc();
865         } else
866                 __raw_writel(last, piobuf);
867 }
868
869 static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
870                                 struct ipath_sge_state *ss, u32 len,
871                                 u32 plen, u32 dwords)
872 {
873         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
874         u32 __iomem *piobuf;
875         unsigned flush_wc;
876         int ret;
877
878         piobuf = ipath_getpiobuf(dd, NULL);
879         if (unlikely(piobuf == NULL)) {
880                 ret = -EBUSY;
881                 goto bail;
882         }
883
884         /*
885          * Write len to control qword, no flags.
886          * We have to flush after the PBC for correctness on some cpus
887          * or WC buffer can be written out of order.
888          */
889         writeq(plen, piobuf);
890         piobuf += 2;
891
892         flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
893         if (len == 0) {
894                 /*
895                  * If there is just the header portion, must flush before
896                  * writing last word of header for correctness, and after
897                  * the last header word (trigger word).
898                  */
899                 if (flush_wc) {
900                         ipath_flush_wc();
901                         __iowrite32_copy(piobuf, hdr, hdrwords - 1);
902                         ipath_flush_wc();
903                         __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
904                         ipath_flush_wc();
905                 } else
906                         __iowrite32_copy(piobuf, hdr, hdrwords);
907                 goto done;
908         }
909
910         if (flush_wc)
911                 ipath_flush_wc();
912         __iowrite32_copy(piobuf, hdr, hdrwords);
913         piobuf += hdrwords;
914
915         /* The common case is aligned and contained in one segment. */
916         if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
917                    !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
918                 u32 *addr = (u32 *) ss->sge.vaddr;
919
920                 /* Update address before sending packet. */
921                 update_sge(ss, len);
922                 if (flush_wc) {
923                         __iowrite32_copy(piobuf, addr, dwords - 1);
924                         /* must flush early everything before trigger word */
925                         ipath_flush_wc();
926                         __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
927                         /* be sure trigger word is written */
928                         ipath_flush_wc();
929                 } else
930                         __iowrite32_copy(piobuf, addr, dwords);
931                 goto done;
932         }
933         copy_io(piobuf, ss, len, flush_wc);
934 done:
935         if (qp->s_wqe)
936                 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
937         ret = 0;
938 bail:
939         return ret;
940 }
941
942 /**
943  * ipath_verbs_send - send a packet
944  * @qp: the QP to send on
945  * @hdr: the packet header
946  * @hdrwords: the number of words in the header
947  * @ss: the SGE to send
948  * @len: the length of the packet in bytes
949  */
950 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
951                      u32 hdrwords, struct ipath_sge_state *ss, u32 len)
952 {
953         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
954         u32 plen;
955         int ret;
956         u32 dwords = (len + 3) >> 2;
957
958         /* +1 is for the qword padding of pbc */
959         plen = hdrwords + dwords + 1;
960
961         /* Drop non-VL15 packets if we are not in the active state */
962         if (!(dd->ipath_flags & IPATH_LINKACTIVE) &&
963             qp->ibqp.qp_type != IB_QPT_SMI) {
964                 if (qp->s_wqe)
965                         ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
966                 ret = 0;
967         } else
968                 ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords,
969                                            ss, len, plen, dwords);
970
971         return ret;
972 }
973
974 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
975                             u64 *rwords, u64 *spkts, u64 *rpkts,
976                             u64 *xmit_wait)
977 {
978         int ret;
979
980         if (!(dd->ipath_flags & IPATH_INITTED)) {
981                 /* no hardware, freeze, etc. */
982                 ret = -EINVAL;
983                 goto bail;
984         }
985         *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
986         *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
987         *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
988         *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
989         *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
990
991         ret = 0;
992
993 bail:
994         return ret;
995 }
996
997 /**
998  * ipath_get_counters - get various chip counters
999  * @dd: the infinipath device
1000  * @cntrs: counters are placed here
1001  *
1002  * Return the counters needed by recv_pma_get_portcounters().
1003  */
1004 int ipath_get_counters(struct ipath_devdata *dd,
1005                        struct ipath_verbs_counters *cntrs)
1006 {
1007         struct ipath_cregs const *crp = dd->ipath_cregs;
1008         int ret;
1009
1010         if (!(dd->ipath_flags & IPATH_INITTED)) {
1011                 /* no hardware, freeze, etc. */
1012                 ret = -EINVAL;
1013                 goto bail;
1014         }
1015         cntrs->symbol_error_counter =
1016                 ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
1017         cntrs->link_error_recovery_counter =
1018                 ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
1019         /*
1020          * The link downed counter counts when the other side downs the
1021          * connection.  We add in the number of times we downed the link
1022          * due to local link integrity errors to compensate.
1023          */
1024         cntrs->link_downed_counter =
1025                 ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
1026         cntrs->port_rcv_errors =
1027                 ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
1028                 ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
1029                 ipath_snap_cntr(dd, crp->cr_portovflcnt) +
1030                 ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
1031                 ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
1032                 ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
1033                 ipath_snap_cntr(dd, crp->cr_erricrccnt) +
1034                 ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
1035                 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1036                 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1037                 dd->ipath_rxfc_unsupvl_errs;
1038         cntrs->port_rcv_remphys_errors =
1039                 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1040         cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
1041         cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
1042         cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
1043         cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1044         cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1045         cntrs->local_link_integrity_errors =
1046                 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1047                 dd->ipath_lli_errs : dd->ipath_lli_errors;
1048         cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs;
1049
1050         ret = 0;
1051
1052 bail:
1053         return ret;
1054 }
1055
1056 /**
1057  * ipath_ib_piobufavail - callback when a PIO buffer is available
1058  * @arg: the device pointer
1059  *
1060  * This is called from ipath_intr() at interrupt level when a PIO buffer is
1061  * available after ipath_verbs_send() returned an error that no buffers were
1062  * available.  Return 1 if we consumed all the PIO buffers and we still have
1063  * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
1064  * return zero).
1065  */
1066 int ipath_ib_piobufavail(struct ipath_ibdev *dev)
1067 {
1068         struct ipath_qp *qp;
1069         unsigned long flags;
1070
1071         if (dev == NULL)
1072                 goto bail;
1073
1074         spin_lock_irqsave(&dev->pending_lock, flags);
1075         while (!list_empty(&dev->piowait)) {
1076                 qp = list_entry(dev->piowait.next, struct ipath_qp,
1077                                 piowait);
1078                 list_del_init(&qp->piowait);
1079                 clear_bit(IPATH_S_BUSY, &qp->s_busy);
1080                 tasklet_hi_schedule(&qp->s_task);
1081         }
1082         spin_unlock_irqrestore(&dev->pending_lock, flags);
1083
1084 bail:
1085         return 0;
1086 }
1087
1088 static int ipath_query_device(struct ib_device *ibdev,
1089                               struct ib_device_attr *props)
1090 {
1091         struct ipath_ibdev *dev = to_idev(ibdev);
1092
1093         memset(props, 0, sizeof(*props));
1094
1095         props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1096                 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1097                 IB_DEVICE_SYS_IMAGE_GUID;
1098         props->page_size_cap = PAGE_SIZE;
1099         props->vendor_id = dev->dd->ipath_vendorid;
1100         props->vendor_part_id = dev->dd->ipath_deviceid;
1101         props->hw_ver = dev->dd->ipath_pcirev;
1102
1103         props->sys_image_guid = dev->sys_image_guid;
1104
1105         props->max_mr_size = ~0ull;
1106         props->max_qp = ib_ipath_max_qps;
1107         props->max_qp_wr = ib_ipath_max_qp_wrs;
1108         props->max_sge = ib_ipath_max_sges;
1109         props->max_cq = ib_ipath_max_cqs;
1110         props->max_ah = ib_ipath_max_ahs;
1111         props->max_cqe = ib_ipath_max_cqes;
1112         props->max_mr = dev->lk_table.max;
1113         props->max_fmr = dev->lk_table.max;
1114         props->max_map_per_fmr = 32767;
1115         props->max_pd = ib_ipath_max_pds;
1116         props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
1117         props->max_qp_init_rd_atom = 255;
1118         /* props->max_res_rd_atom */
1119         props->max_srq = ib_ipath_max_srqs;
1120         props->max_srq_wr = ib_ipath_max_srq_wrs;
1121         props->max_srq_sge = ib_ipath_max_srq_sges;
1122         /* props->local_ca_ack_delay */
1123         props->atomic_cap = IB_ATOMIC_GLOB;
1124         props->max_pkeys = ipath_get_npkeys(dev->dd);
1125         props->max_mcast_grp = ib_ipath_max_mcast_grps;
1126         props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
1127         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1128                 props->max_mcast_grp;
1129
1130         return 0;
1131 }
1132
1133 const u8 ipath_cvt_physportstate[16] = {
1134         [INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
1135         [INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
1136         [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
1137         [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
1138         [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
1139         [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
1140         [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
1141         [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
1142         [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
1143         [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
1144         [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
1145         [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
1146         [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
1147 };
1148
1149 u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1150 {
1151         return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1152 }
1153
1154 static int ipath_query_port(struct ib_device *ibdev,
1155                             u8 port, struct ib_port_attr *props)
1156 {
1157         struct ipath_ibdev *dev = to_idev(ibdev);
1158         struct ipath_devdata *dd = dev->dd;
1159         enum ib_mtu mtu;
1160         u16 lid = dd->ipath_lid;
1161         u64 ibcstat;
1162
1163         memset(props, 0, sizeof(*props));
1164         props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1165         props->lmc = dd->ipath_lmc;
1166         props->sm_lid = dev->sm_lid;
1167         props->sm_sl = dev->sm_sl;
1168         ibcstat = dd->ipath_lastibcstat;
1169         props->state = ((ibcstat >> 4) & 0x3) + 1;
1170         /* See phys_state_show() */
1171         props->phys_state = ipath_cvt_physportstate[
1172                 dd->ipath_lastibcstat & 0xf];
1173         props->port_cap_flags = dev->port_cap_flags;
1174         props->gid_tbl_len = 1;
1175         props->max_msg_sz = 0x80000000;
1176         props->pkey_tbl_len = ipath_get_npkeys(dd);
1177         props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1178                 dev->z_pkey_violations;
1179         props->qkey_viol_cntr = dev->qkey_violations;
1180         props->active_width = IB_WIDTH_4X;
1181         /* See rate_show() */
1182         props->active_speed = 1;        /* Regular 10Mbs speed. */
1183         props->max_vl_num = 1;          /* VLCap = VL0 */
1184         props->init_type_reply = 0;
1185
1186         /*
1187          * Note: the chip supports a maximum MTU of 4096, but the driver
1188          * hasn't implemented this feature yet, so set the maximum value
1189          * to 2048.
1190          */
1191         props->max_mtu = IB_MTU_2048;
1192         switch (dd->ipath_ibmtu) {
1193         case 4096:
1194                 mtu = IB_MTU_4096;
1195                 break;
1196         case 2048:
1197                 mtu = IB_MTU_2048;
1198                 break;
1199         case 1024:
1200                 mtu = IB_MTU_1024;
1201                 break;
1202         case 512:
1203                 mtu = IB_MTU_512;
1204                 break;
1205         case 256:
1206                 mtu = IB_MTU_256;
1207                 break;
1208         default:
1209                 mtu = IB_MTU_2048;
1210         }
1211         props->active_mtu = mtu;
1212         props->subnet_timeout = dev->subnet_timeout;
1213
1214         return 0;
1215 }
1216
1217 static int ipath_modify_device(struct ib_device *device,
1218                                int device_modify_mask,
1219                                struct ib_device_modify *device_modify)
1220 {
1221         int ret;
1222
1223         if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1224                                    IB_DEVICE_MODIFY_NODE_DESC)) {
1225                 ret = -EOPNOTSUPP;
1226                 goto bail;
1227         }
1228
1229         if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
1230                 memcpy(device->node_desc, device_modify->node_desc, 64);
1231
1232         if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
1233                 to_idev(device)->sys_image_guid =
1234                         cpu_to_be64(device_modify->sys_image_guid);
1235
1236         ret = 0;
1237
1238 bail:
1239         return ret;
1240 }
1241
1242 static int ipath_modify_port(struct ib_device *ibdev,
1243                              u8 port, int port_modify_mask,
1244                              struct ib_port_modify *props)
1245 {
1246         struct ipath_ibdev *dev = to_idev(ibdev);
1247
1248         dev->port_cap_flags |= props->set_port_cap_mask;
1249         dev->port_cap_flags &= ~props->clr_port_cap_mask;
1250         if (port_modify_mask & IB_PORT_SHUTDOWN)
1251                 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
1252         if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1253                 dev->qkey_violations = 0;
1254         return 0;
1255 }
1256
1257 static int ipath_query_gid(struct ib_device *ibdev, u8 port,
1258                            int index, union ib_gid *gid)
1259 {
1260         struct ipath_ibdev *dev = to_idev(ibdev);
1261         int ret;
1262
1263         if (index >= 1) {
1264                 ret = -EINVAL;
1265                 goto bail;
1266         }
1267         gid->global.subnet_prefix = dev->gid_prefix;
1268         gid->global.interface_id = dev->dd->ipath_guid;
1269
1270         ret = 0;
1271
1272 bail:
1273         return ret;
1274 }
1275
1276 static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
1277                                     struct ib_ucontext *context,
1278                                     struct ib_udata *udata)
1279 {
1280         struct ipath_ibdev *dev = to_idev(ibdev);
1281         struct ipath_pd *pd;
1282         struct ib_pd *ret;
1283
1284         /*
1285          * This is actually totally arbitrary.  Some correctness tests
1286          * assume there's a maximum number of PDs that can be allocated.
1287          * We don't actually have this limit, but we fail the test if
1288          * we allow allocations of more than we report for this value.
1289          */
1290
1291         pd = kmalloc(sizeof *pd, GFP_KERNEL);
1292         if (!pd) {
1293                 ret = ERR_PTR(-ENOMEM);
1294                 goto bail;
1295         }
1296
1297         spin_lock(&dev->n_pds_lock);
1298         if (dev->n_pds_allocated == ib_ipath_max_pds) {
1299                 spin_unlock(&dev->n_pds_lock);
1300                 kfree(pd);
1301                 ret = ERR_PTR(-ENOMEM);
1302                 goto bail;
1303         }
1304
1305         dev->n_pds_allocated++;
1306         spin_unlock(&dev->n_pds_lock);
1307
1308         /* ib_alloc_pd() will initialize pd->ibpd. */
1309         pd->user = udata != NULL;
1310
1311         ret = &pd->ibpd;
1312
1313 bail:
1314         return ret;
1315 }
1316
1317 static int ipath_dealloc_pd(struct ib_pd *ibpd)
1318 {
1319         struct ipath_pd *pd = to_ipd(ibpd);
1320         struct ipath_ibdev *dev = to_idev(ibpd->device);
1321
1322         spin_lock(&dev->n_pds_lock);
1323         dev->n_pds_allocated--;
1324         spin_unlock(&dev->n_pds_lock);
1325
1326         kfree(pd);
1327
1328         return 0;
1329 }
1330
1331 /**
1332  * ipath_create_ah - create an address handle
1333  * @pd: the protection domain
1334  * @ah_attr: the attributes of the AH
1335  *
1336  * This may be called from interrupt context.
1337  */
1338 static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1339                                      struct ib_ah_attr *ah_attr)
1340 {
1341         struct ipath_ah *ah;
1342         struct ib_ah *ret;
1343         struct ipath_ibdev *dev = to_idev(pd->device);
1344         unsigned long flags;
1345
1346         /* A multicast address requires a GRH (see ch. 8.4.1). */
1347         if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
1348             ah_attr->dlid != IPATH_PERMISSIVE_LID &&
1349             !(ah_attr->ah_flags & IB_AH_GRH)) {
1350                 ret = ERR_PTR(-EINVAL);
1351                 goto bail;
1352         }
1353
1354         if (ah_attr->dlid == 0) {
1355                 ret = ERR_PTR(-EINVAL);
1356                 goto bail;
1357         }
1358
1359         if (ah_attr->port_num < 1 ||
1360             ah_attr->port_num > pd->device->phys_port_cnt) {
1361                 ret = ERR_PTR(-EINVAL);
1362                 goto bail;
1363         }
1364
1365         ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1366         if (!ah) {
1367                 ret = ERR_PTR(-ENOMEM);
1368                 goto bail;
1369         }
1370
1371         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1372         if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1373                 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1374                 kfree(ah);
1375                 ret = ERR_PTR(-ENOMEM);
1376                 goto bail;
1377         }
1378
1379         dev->n_ahs_allocated++;
1380         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1381
1382         /* ib_create_ah() will initialize ah->ibah. */
1383         ah->attr = *ah_attr;
1384
1385         ret = &ah->ibah;
1386
1387 bail:
1388         return ret;
1389 }
1390
1391 /**
1392  * ipath_destroy_ah - destroy an address handle
1393  * @ibah: the AH to destroy
1394  *
1395  * This may be called from interrupt context.
1396  */
1397 static int ipath_destroy_ah(struct ib_ah *ibah)
1398 {
1399         struct ipath_ibdev *dev = to_idev(ibah->device);
1400         struct ipath_ah *ah = to_iah(ibah);
1401         unsigned long flags;
1402
1403         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1404         dev->n_ahs_allocated--;
1405         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1406
1407         kfree(ah);
1408
1409         return 0;
1410 }
1411
1412 static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1413 {
1414         struct ipath_ah *ah = to_iah(ibah);
1415
1416         *ah_attr = ah->attr;
1417
1418         return 0;
1419 }
1420
1421 /**
1422  * ipath_get_npkeys - return the size of the PKEY table for port 0
1423  * @dd: the infinipath device
1424  */
1425 unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1426 {
1427         return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1428 }
1429
1430 /**
1431  * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1432  * @dd: the infinipath device
1433  * @index: the PKEY index
1434  */
1435 unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1436 {
1437         unsigned ret;
1438
1439         if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1440                 ret = 0;
1441         else
1442                 ret = dd->ipath_pd[0]->port_pkeys[index];
1443
1444         return ret;
1445 }
1446
1447 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1448                             u16 *pkey)
1449 {
1450         struct ipath_ibdev *dev = to_idev(ibdev);
1451         int ret;
1452
1453         if (index >= ipath_get_npkeys(dev->dd)) {
1454                 ret = -EINVAL;
1455                 goto bail;
1456         }
1457
1458         *pkey = ipath_get_pkey(dev->dd, index);
1459         ret = 0;
1460
1461 bail:
1462         return ret;
1463 }
1464
1465 /**
1466  * ipath_alloc_ucontext - allocate a ucontest
1467  * @ibdev: the infiniband device
1468  * @udata: not used by the InfiniPath driver
1469  */
1470
1471 static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
1472                                                 struct ib_udata *udata)
1473 {
1474         struct ipath_ucontext *context;
1475         struct ib_ucontext *ret;
1476
1477         context = kmalloc(sizeof *context, GFP_KERNEL);
1478         if (!context) {
1479                 ret = ERR_PTR(-ENOMEM);
1480                 goto bail;
1481         }
1482
1483         ret = &context->ibucontext;
1484
1485 bail:
1486         return ret;
1487 }
1488
1489 static int ipath_dealloc_ucontext(struct ib_ucontext *context)
1490 {
1491         kfree(to_iucontext(context));
1492         return 0;
1493 }
1494
1495 static int ipath_verbs_register_sysfs(struct ib_device *dev);
1496
1497 static void __verbs_timer(unsigned long arg)
1498 {
1499         struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1500
1501         /* Handle verbs layer timeouts. */
1502         ipath_ib_timer(dd->verbs_dev);
1503
1504         mod_timer(&dd->verbs_timer, jiffies + 1);
1505 }
1506
1507 static int enable_timer(struct ipath_devdata *dd)
1508 {
1509         /*
1510          * Early chips had a design flaw where the chip and kernel idea
1511          * of the tail register don't always agree, and therefore we won't
1512          * get an interrupt on the next packet received.
1513          * If the board supports per packet receive interrupts, use it.
1514          * Otherwise, the timer function periodically checks for packets
1515          * to cover this case.
1516          * Either way, the timer is needed for verbs layer related
1517          * processing.
1518          */
1519         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1520                 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1521                                  0x2074076542310ULL);
1522                 /* Enable GPIO bit 2 interrupt */
1523                 dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
1524                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1525                                  dd->ipath_gpio_mask);
1526         }
1527
1528         init_timer(&dd->verbs_timer);
1529         dd->verbs_timer.function = __verbs_timer;
1530         dd->verbs_timer.data = (unsigned long)dd;
1531         dd->verbs_timer.expires = jiffies + 1;
1532         add_timer(&dd->verbs_timer);
1533
1534         return 0;
1535 }
1536
1537 static int disable_timer(struct ipath_devdata *dd)
1538 {
1539         /* Disable GPIO bit 2 interrupt */
1540         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1541                 /* Disable GPIO bit 2 interrupt */
1542                 dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
1543                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1544                                  dd->ipath_gpio_mask);
1545                 /*
1546                  * We might want to undo changes to debugportselect,
1547                  * but how?
1548                  */
1549         }
1550
1551         del_timer_sync(&dd->verbs_timer);
1552
1553         return 0;
1554 }
1555
1556 /**
1557  * ipath_register_ib_device - register our device with the infiniband core
1558  * @dd: the device data structure
1559  * Return the allocated ipath_ibdev pointer or NULL on error.
1560  */
1561 int ipath_register_ib_device(struct ipath_devdata *dd)
1562 {
1563         struct ipath_verbs_counters cntrs;
1564         struct ipath_ibdev *idev;
1565         struct ib_device *dev;
1566         int ret;
1567
1568         idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
1569         if (idev == NULL) {
1570                 ret = -ENOMEM;
1571                 goto bail;
1572         }
1573
1574         dev = &idev->ibdev;
1575
1576         /* Only need to initialize non-zero fields. */
1577         spin_lock_init(&idev->n_pds_lock);
1578         spin_lock_init(&idev->n_ahs_lock);
1579         spin_lock_init(&idev->n_cqs_lock);
1580         spin_lock_init(&idev->n_qps_lock);
1581         spin_lock_init(&idev->n_srqs_lock);
1582         spin_lock_init(&idev->n_mcast_grps_lock);
1583
1584         spin_lock_init(&idev->qp_table.lock);
1585         spin_lock_init(&idev->lk_table.lock);
1586         idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1587         /* Set the prefix to the default value (see ch. 4.1.1) */
1588         idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
1589
1590         ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
1591         if (ret)
1592                 goto err_qp;
1593
1594         /*
1595          * The top ib_ipath_lkey_table_size bits are used to index the
1596          * table.  The lower 8 bits can be owned by the user (copied from
1597          * the LKEY).  The remaining bits act as a generation number or tag.
1598          */
1599         idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
1600         idev->lk_table.table = kzalloc(idev->lk_table.max *
1601                                        sizeof(*idev->lk_table.table),
1602                                        GFP_KERNEL);
1603         if (idev->lk_table.table == NULL) {
1604                 ret = -ENOMEM;
1605                 goto err_lk;
1606         }
1607         INIT_LIST_HEAD(&idev->pending_mmaps);
1608         spin_lock_init(&idev->pending_lock);
1609         idev->mmap_offset = PAGE_SIZE;
1610         spin_lock_init(&idev->mmap_offset_lock);
1611         INIT_LIST_HEAD(&idev->pending[0]);
1612         INIT_LIST_HEAD(&idev->pending[1]);
1613         INIT_LIST_HEAD(&idev->pending[2]);
1614         INIT_LIST_HEAD(&idev->piowait);
1615         INIT_LIST_HEAD(&idev->rnrwait);
1616         idev->pending_index = 0;
1617         idev->port_cap_flags =
1618                 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1619         idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1620         idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1621         idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1622         idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1623         idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1624         idev->link_width_enabled = 3;   /* 1x or 4x */
1625
1626         /* Snapshot current HW counters to "clear" them. */
1627         ipath_get_counters(dd, &cntrs);
1628         idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1629         idev->z_link_error_recovery_counter =
1630                 cntrs.link_error_recovery_counter;
1631         idev->z_link_downed_counter = cntrs.link_downed_counter;
1632         idev->z_port_rcv_errors = cntrs.port_rcv_errors;
1633         idev->z_port_rcv_remphys_errors =
1634                 cntrs.port_rcv_remphys_errors;
1635         idev->z_port_xmit_discards = cntrs.port_xmit_discards;
1636         idev->z_port_xmit_data = cntrs.port_xmit_data;
1637         idev->z_port_rcv_data = cntrs.port_rcv_data;
1638         idev->z_port_xmit_packets = cntrs.port_xmit_packets;
1639         idev->z_port_rcv_packets = cntrs.port_rcv_packets;
1640         idev->z_local_link_integrity_errors =
1641                 cntrs.local_link_integrity_errors;
1642         idev->z_excessive_buffer_overrun_errors =
1643                 cntrs.excessive_buffer_overrun_errors;
1644         idev->z_vl15_dropped = cntrs.vl15_dropped;
1645
1646         /*
1647          * The system image GUID is supposed to be the same for all
1648          * IB HCAs in a single system but since there can be other
1649          * device types in the system, we can't be sure this is unique.
1650          */
1651         if (!sys_image_guid)
1652                 sys_image_guid = dd->ipath_guid;
1653         idev->sys_image_guid = sys_image_guid;
1654         idev->ib_unit = dd->ipath_unit;
1655         idev->dd = dd;
1656
1657         strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1658         dev->owner = THIS_MODULE;
1659         dev->node_guid = dd->ipath_guid;
1660         dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1661         dev->uverbs_cmd_mask =
1662                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
1663                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
1664                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
1665                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
1666                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
1667                 (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
1668                 (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
1669                 (1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
1670                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
1671                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
1672                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1673                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
1674                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
1675                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
1676                 (1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
1677                 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
1678                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
1679                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
1680                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
1681                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
1682                 (1ull << IB_USER_VERBS_CMD_POST_SEND)           |
1683                 (1ull << IB_USER_VERBS_CMD_POST_RECV)           |
1684                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
1685                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
1686                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
1687                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
1688                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
1689                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
1690                 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1691         dev->node_type = RDMA_NODE_IB_CA;
1692         dev->phys_port_cnt = 1;
1693         dev->num_comp_vectors = 1;
1694         dev->dma_device = &dd->pcidev->dev;
1695         dev->query_device = ipath_query_device;
1696         dev->modify_device = ipath_modify_device;
1697         dev->query_port = ipath_query_port;
1698         dev->modify_port = ipath_modify_port;
1699         dev->query_pkey = ipath_query_pkey;
1700         dev->query_gid = ipath_query_gid;
1701         dev->alloc_ucontext = ipath_alloc_ucontext;
1702         dev->dealloc_ucontext = ipath_dealloc_ucontext;
1703         dev->alloc_pd = ipath_alloc_pd;
1704         dev->dealloc_pd = ipath_dealloc_pd;
1705         dev->create_ah = ipath_create_ah;
1706         dev->destroy_ah = ipath_destroy_ah;
1707         dev->query_ah = ipath_query_ah;
1708         dev->create_srq = ipath_create_srq;
1709         dev->modify_srq = ipath_modify_srq;
1710         dev->query_srq = ipath_query_srq;
1711         dev->destroy_srq = ipath_destroy_srq;
1712         dev->create_qp = ipath_create_qp;
1713         dev->modify_qp = ipath_modify_qp;
1714         dev->query_qp = ipath_query_qp;
1715         dev->destroy_qp = ipath_destroy_qp;
1716         dev->post_send = ipath_post_send;
1717         dev->post_recv = ipath_post_receive;
1718         dev->post_srq_recv = ipath_post_srq_receive;
1719         dev->create_cq = ipath_create_cq;
1720         dev->destroy_cq = ipath_destroy_cq;
1721         dev->resize_cq = ipath_resize_cq;
1722         dev->poll_cq = ipath_poll_cq;
1723         dev->req_notify_cq = ipath_req_notify_cq;
1724         dev->get_dma_mr = ipath_get_dma_mr;
1725         dev->reg_phys_mr = ipath_reg_phys_mr;
1726         dev->reg_user_mr = ipath_reg_user_mr;
1727         dev->dereg_mr = ipath_dereg_mr;
1728         dev->alloc_fmr = ipath_alloc_fmr;
1729         dev->map_phys_fmr = ipath_map_phys_fmr;
1730         dev->unmap_fmr = ipath_unmap_fmr;
1731         dev->dealloc_fmr = ipath_dealloc_fmr;
1732         dev->attach_mcast = ipath_multicast_attach;
1733         dev->detach_mcast = ipath_multicast_detach;
1734         dev->process_mad = ipath_process_mad;
1735         dev->mmap = ipath_mmap;
1736         dev->dma_ops = &ipath_dma_mapping_ops;
1737
1738         snprintf(dev->node_desc, sizeof(dev->node_desc),
1739                  IPATH_IDSTR " %s", init_utsname()->nodename);
1740
1741         ret = ib_register_device(dev);
1742         if (ret)
1743                 goto err_reg;
1744
1745         if (ipath_verbs_register_sysfs(dev))
1746                 goto err_class;
1747
1748         enable_timer(dd);
1749
1750         goto bail;
1751
1752 err_class:
1753         ib_unregister_device(dev);
1754 err_reg:
1755         kfree(idev->lk_table.table);
1756 err_lk:
1757         kfree(idev->qp_table.table);
1758 err_qp:
1759         ib_dealloc_device(dev);
1760         ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1761         idev = NULL;
1762
1763 bail:
1764         dd->verbs_dev = idev;
1765         return ret;
1766 }
1767
1768 void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1769 {
1770         struct ib_device *ibdev = &dev->ibdev;
1771
1772         disable_timer(dev->dd);
1773
1774         ib_unregister_device(ibdev);
1775
1776         if (!list_empty(&dev->pending[0]) ||
1777             !list_empty(&dev->pending[1]) ||
1778             !list_empty(&dev->pending[2]))
1779                 ipath_dev_err(dev->dd, "pending list not empty!\n");
1780         if (!list_empty(&dev->piowait))
1781                 ipath_dev_err(dev->dd, "piowait list not empty!\n");
1782         if (!list_empty(&dev->rnrwait))
1783                 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
1784         if (!ipath_mcast_tree_empty())
1785                 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
1786         /*
1787          * Note that ipath_unregister_ib_device() can be called before all
1788          * the QPs are destroyed!
1789          */
1790         ipath_free_all_qps(&dev->qp_table);
1791         kfree(dev->qp_table.table);
1792         kfree(dev->lk_table.table);
1793         ib_dealloc_device(ibdev);
1794 }
1795
1796 static ssize_t show_rev(struct class_device *cdev, char *buf)
1797 {
1798         struct ipath_ibdev *dev =
1799                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1800
1801         return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1802 }
1803
1804 static ssize_t show_hca(struct class_device *cdev, char *buf)
1805 {
1806         struct ipath_ibdev *dev =
1807                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1808         int ret;
1809
1810         ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1811         if (ret < 0)
1812                 goto bail;
1813         strcat(buf, "\n");
1814         ret = strlen(buf);
1815
1816 bail:
1817         return ret;
1818 }
1819
1820 static ssize_t show_stats(struct class_device *cdev, char *buf)
1821 {
1822         struct ipath_ibdev *dev =
1823                 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1824         int i;
1825         int len;
1826
1827         len = sprintf(buf,
1828                       "RC resends  %d\n"
1829                       "RC no QACK  %d\n"
1830                       "RC ACKs     %d\n"
1831                       "RC SEQ NAKs %d\n"
1832                       "RC RDMA seq %d\n"
1833                       "RC RNR NAKs %d\n"
1834                       "RC OTH NAKs %d\n"
1835                       "RC timeouts %d\n"
1836                       "RC RDMA dup %d\n"
1837                       "RC stalls   %d\n"
1838                       "piobuf wait %d\n"
1839                       "no piobuf   %d\n"
1840                       "PKT drops   %d\n"
1841                       "WQE errs    %d\n",
1842                       dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1843                       dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1844                       dev->n_other_naks, dev->n_timeouts,
1845                       dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
1846                       dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
1847         for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1848                 const struct ipath_opcode_stats *si = &dev->opstats[i];
1849
1850                 if (!si->n_packets && !si->n_bytes)
1851                         continue;
1852                 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
1853                                (unsigned long long) si->n_packets,
1854                                (unsigned long long) si->n_bytes);
1855         }
1856         return len;
1857 }
1858
1859 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1860 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1861 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
1862 static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
1863
1864 static struct class_device_attribute *ipath_class_attributes[] = {
1865         &class_device_attr_hw_rev,
1866         &class_device_attr_hca_type,
1867         &class_device_attr_board_id,
1868         &class_device_attr_stats
1869 };
1870
1871 static int ipath_verbs_register_sysfs(struct ib_device *dev)
1872 {
1873         int i;
1874         int ret;
1875
1876         for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
1877                 if (class_device_create_file(&dev->class_dev,
1878                                              ipath_class_attributes[i])) {
1879                         ret = 1;
1880                         goto bail;
1881                 }
1882
1883         ret = 0;
1884
1885 bail:
1886         return ret;
1887 }