2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
37 #include <linux/utsname.h>
39 #include "ipath_kernel.h"
40 #include "ipath_verbs.h"
41 #include "ipath_common.h"
43 static unsigned int ib_ipath_qp_table_size = 251;
44 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
45 MODULE_PARM_DESC(qp_table_size, "QP table size");
47 unsigned int ib_ipath_lkey_table_size = 12;
48 module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
50 MODULE_PARM_DESC(lkey_table_size,
51 "LKEY table size in bits (2^n, 1 <= n <= 23)");
53 static unsigned int ib_ipath_max_pds = 0xFFFF;
54 module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
55 MODULE_PARM_DESC(max_pds,
56 "Maximum number of protection domains to support");
58 static unsigned int ib_ipath_max_ahs = 0xFFFF;
59 module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
60 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
62 unsigned int ib_ipath_max_cqes = 0x2FFFF;
63 module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
64 MODULE_PARM_DESC(max_cqes,
65 "Maximum number of completion queue entries to support");
67 unsigned int ib_ipath_max_cqs = 0x1FFFF;
68 module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
69 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
71 unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
72 module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
74 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
76 unsigned int ib_ipath_max_sges = 0x60;
77 module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
78 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
80 unsigned int ib_ipath_max_mcast_grps = 16384;
81 module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
83 MODULE_PARM_DESC(max_mcast_grps,
84 "Maximum number of multicast groups to support");
86 unsigned int ib_ipath_max_mcast_qp_attached = 16;
87 module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
88 uint, S_IWUSR | S_IRUGO);
89 MODULE_PARM_DESC(max_mcast_qp_attached,
90 "Maximum number of attached QPs to support");
92 unsigned int ib_ipath_max_srqs = 1024;
93 module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
94 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
96 unsigned int ib_ipath_max_srq_sges = 128;
97 module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
98 uint, S_IWUSR | S_IRUGO);
99 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
101 unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
102 module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
103 uint, S_IWUSR | S_IRUGO);
104 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
106 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
108 [IB_QPS_INIT] = IPATH_POST_RECV_OK,
109 [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
110 [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
111 IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
112 [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
114 [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
118 struct ipath_ucontext {
119 struct ib_ucontext ibucontext;
122 static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
125 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
129 * Translate ib_wr_opcode into ib_wc_opcode.
131 const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
132 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
133 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
134 [IB_WR_SEND] = IB_WC_SEND,
135 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
136 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
137 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
138 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
144 static __be64 sys_image_guid;
147 * ipath_copy_sge - copy data to SGE memory
149 * @data: the data to copy
150 * @length: the length of the data
152 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
154 struct ipath_sge *sge = &ss->sge;
157 u32 len = sge->length;
162 memcpy(sge->vaddr, data, len);
165 sge->sge_length -= len;
166 if (sge->sge_length == 0) {
168 *sge = *ss->sg_list++;
169 } else if (sge->length == 0 && sge->mr != NULL) {
170 if (++sge->n >= IPATH_SEGSZ) {
171 if (++sge->m >= sge->mr->mapsz)
176 sge->mr->map[sge->m]->segs[sge->n].vaddr;
178 sge->mr->map[sge->m]->segs[sge->n].length;
186 * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
188 * @length: the number of bytes to skip
190 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
192 struct ipath_sge *sge = &ss->sge;
195 u32 len = sge->length;
202 sge->sge_length -= len;
203 if (sge->sge_length == 0) {
205 *sge = *ss->sg_list++;
206 } else if (sge->length == 0 && sge->mr != NULL) {
207 if (++sge->n >= IPATH_SEGSZ) {
208 if (++sge->m >= sge->mr->mapsz)
213 sge->mr->map[sge->m]->segs[sge->n].vaddr;
215 sge->mr->map[sge->m]->segs[sge->n].length;
222 * ipath_post_send - post a send on a QP
223 * @ibqp: the QP to post the send on
224 * @wr: the list of work requests to post
225 * @bad_wr: the first bad WR is put here
227 * This may be called from interrupt context.
229 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
230 struct ib_send_wr **bad_wr)
232 struct ipath_qp *qp = to_iqp(ibqp);
235 /* Check that state is OK to post send. */
236 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)) {
242 for (; wr; wr = wr->next) {
243 switch (qp->ibqp.qp_type) {
246 err = ipath_post_ruc_send(qp, wr);
252 err = ipath_post_ud_send(qp, wr);
269 * ipath_post_receive - post a receive on a QP
270 * @ibqp: the QP to post the receive on
271 * @wr: the WR to post
272 * @bad_wr: the first bad WR is put here
274 * This may be called from interrupt context.
276 static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
277 struct ib_recv_wr **bad_wr)
279 struct ipath_qp *qp = to_iqp(ibqp);
280 struct ipath_rwq *wq = qp->r_rq.wq;
284 /* Check that state is OK to post receive. */
285 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
291 for (; wr; wr = wr->next) {
292 struct ipath_rwqe *wqe;
296 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
302 spin_lock_irqsave(&qp->r_rq.lock, flags);
304 if (next >= qp->r_rq.size)
306 if (next == wq->tail) {
307 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
313 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
314 wqe->wr_id = wr->wr_id;
315 wqe->num_sge = wr->num_sge;
316 for (i = 0; i < wr->num_sge; i++)
317 wqe->sg_list[i] = wr->sg_list[i];
319 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
328 * ipath_qp_rcv - processing an incoming packet on a QP
329 * @dev: the device the packet came on
330 * @hdr: the packet header
331 * @has_grh: true if the packet has a GRH
332 * @data: the packet data
333 * @tlen: the packet length
334 * @qp: the QP the packet came on
336 * This is called from ipath_ib_rcv() to process an incoming packet
338 * Called at interrupt level.
340 static void ipath_qp_rcv(struct ipath_ibdev *dev,
341 struct ipath_ib_header *hdr, int has_grh,
342 void *data, u32 tlen, struct ipath_qp *qp)
344 /* Check for valid receive state. */
345 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
350 switch (qp->ibqp.qp_type) {
354 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
358 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
362 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
371 * ipath_ib_rcv - process an incoming packet
372 * @arg: the device pointer
373 * @rhdr: the header of the packet
374 * @data: the packet data
375 * @tlen: the packet length
377 * This is called from ipath_kreceive() to process an incoming packet at
378 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
380 void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
383 struct ipath_ib_header *hdr = rhdr;
384 struct ipath_other_headers *ohdr;
391 if (unlikely(dev == NULL))
394 if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
399 /* Check for a valid destination LID (see ch. 7.11.1). */
400 lid = be16_to_cpu(hdr->lrh[1]);
401 if (lid < IPATH_MULTICAST_LID_BASE) {
402 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
403 if (unlikely(lid != dev->dd->ipath_lid)) {
410 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
411 if (lnh == IPATH_LRH_BTH)
413 else if (lnh == IPATH_LRH_GRH)
414 ohdr = &hdr->u.l.oth;
420 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
421 dev->opstats[opcode].n_bytes += tlen;
422 dev->opstats[opcode].n_packets++;
424 /* Get the destination QP number. */
425 qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
426 if (qp_num == IPATH_MULTICAST_QPN) {
427 struct ipath_mcast *mcast;
428 struct ipath_mcast_qp *p;
430 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
435 dev->n_multicast_rcv++;
436 list_for_each_entry_rcu(p, &mcast->qp_list, list)
437 ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
440 * Notify ipath_multicast_detach() if it is waiting for us
443 if (atomic_dec_return(&mcast->refcount) <= 1)
444 wake_up(&mcast->wait);
446 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
448 dev->n_unicast_rcv++;
449 ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
452 * Notify ipath_destroy_qp() if it is waiting
455 if (atomic_dec_and_test(&qp->refcount))
465 * ipath_ib_timer - verbs timer
466 * @arg: the device pointer
468 * This is called from ipath_do_rcv_timer() at interrupt level to check for
469 * QPs which need retransmits and to collect performance numbers.
471 void ipath_ib_timer(struct ipath_ibdev *dev)
473 struct ipath_qp *resend = NULL;
474 struct list_head *last;
481 spin_lock_irqsave(&dev->pending_lock, flags);
482 /* Start filling the next pending queue. */
483 if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
484 dev->pending_index = 0;
485 /* Save any requests still in the new queue, they have timed out. */
486 last = &dev->pending[dev->pending_index];
487 while (!list_empty(last)) {
488 qp = list_entry(last->next, struct ipath_qp, timerwait);
489 list_del_init(&qp->timerwait);
490 qp->timer_next = resend;
492 atomic_inc(&qp->refcount);
494 last = &dev->rnrwait;
495 if (!list_empty(last)) {
496 qp = list_entry(last->next, struct ipath_qp, timerwait);
497 if (--qp->s_rnr_timeout == 0) {
499 list_del_init(&qp->timerwait);
500 tasklet_hi_schedule(&qp->s_task);
501 if (list_empty(last))
503 qp = list_entry(last->next, struct ipath_qp,
505 } while (qp->s_rnr_timeout == 0);
509 * We should only be in the started state if pma_sample_start != 0
511 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
512 --dev->pma_sample_start == 0) {
513 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
514 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
518 &dev->ipath_xmit_wait);
520 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
521 if (dev->pma_sample_interval == 0) {
522 u64 ta, tb, tc, td, te;
524 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
525 ipath_snapshot_counters(dev->dd, &ta, &tb,
528 dev->ipath_sword = ta - dev->ipath_sword;
529 dev->ipath_rword = tb - dev->ipath_rword;
530 dev->ipath_spkts = tc - dev->ipath_spkts;
531 dev->ipath_rpkts = td - dev->ipath_rpkts;
532 dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
535 dev->pma_sample_interval--;
537 spin_unlock_irqrestore(&dev->pending_lock, flags);
539 /* XXX What if timer fires again while this is running? */
540 for (qp = resend; qp != NULL; qp = qp->timer_next) {
543 spin_lock_irqsave(&qp->s_lock, flags);
544 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
546 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
548 spin_unlock_irqrestore(&qp->s_lock, flags);
550 /* Notify ipath_destroy_qp() if it is waiting. */
551 if (atomic_dec_and_test(&qp->refcount))
556 static void update_sge(struct ipath_sge_state *ss, u32 length)
558 struct ipath_sge *sge = &ss->sge;
560 sge->vaddr += length;
561 sge->length -= length;
562 sge->sge_length -= length;
563 if (sge->sge_length == 0) {
565 *sge = *ss->sg_list++;
566 } else if (sge->length == 0 && sge->mr != NULL) {
567 if (++sge->n >= IPATH_SEGSZ) {
568 if (++sge->m >= sge->mr->mapsz)
572 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
573 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
577 #ifdef __LITTLE_ENDIAN
578 static inline u32 get_upper_bits(u32 data, u32 shift)
580 return data >> shift;
583 static inline u32 set_upper_bits(u32 data, u32 shift)
585 return data << shift;
588 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
590 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
591 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
595 static inline u32 get_upper_bits(u32 data, u32 shift)
597 return data << shift;
600 static inline u32 set_upper_bits(u32 data, u32 shift)
602 return data >> shift;
605 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
607 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
608 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
613 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
621 u32 len = ss->sge.length;
627 if (len > ss->sge.sge_length)
628 len = ss->sge.sge_length;
629 /* If the source address is not aligned, try to align it. */
630 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
632 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
634 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
637 y = sizeof(u32) - off;
640 if (len + extra >= sizeof(u32)) {
641 data |= set_upper_bits(v, extra *
643 len = sizeof(u32) - extra;
648 __raw_writel(data, piobuf);
653 /* Clear unused upper bytes */
654 data |= clear_upper_bytes(v, len, extra);
662 /* Source address is aligned. */
663 u32 *addr = (u32 *) ss->sge.vaddr;
664 int shift = extra * BITS_PER_BYTE;
665 int ushift = 32 - shift;
668 while (l >= sizeof(u32)) {
671 data |= set_upper_bits(v, shift);
672 __raw_writel(data, piobuf);
673 data = get_upper_bits(v, ushift);
679 * We still have 'extra' number of bytes leftover.
684 if (l + extra >= sizeof(u32)) {
685 data |= set_upper_bits(v, shift);
686 len -= l + extra - sizeof(u32);
691 __raw_writel(data, piobuf);
696 /* Clear unused upper bytes */
697 data |= clear_upper_bytes(v, l,
705 } else if (len == length) {
709 } else if (len == length) {
713 * Need to round up for the last dword in the
717 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
719 last = ((u32 *) ss->sge.vaddr)[w - 1];
724 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
727 extra = len & (sizeof(u32) - 1);
729 u32 v = ((u32 *) ss->sge.vaddr)[w];
731 /* Clear unused upper bytes */
732 data = clear_upper_bytes(v, extra, 0);
738 /* Update address before sending packet. */
739 update_sge(ss, length);
740 /* must flush early everything before trigger word */
742 __raw_writel(last, piobuf);
743 /* be sure trigger word is written */
748 * ipath_verbs_send - send a packet
749 * @dd: the infinipath device
750 * @hdrwords: the number of words in the header
751 * @hdr: the packet header
752 * @len: the length of the packet in bytes
753 * @ss: the SGE to send
755 int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
756 u32 *hdr, u32 len, struct ipath_sge_state *ss)
762 /* +1 is for the qword padding of pbc */
763 plen = hdrwords + ((len + 3) >> 2) + 1;
764 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
765 ipath_dbg("packet len 0x%x too long, failing\n", plen);
770 /* Get a PIO buffer to use. */
771 piobuf = ipath_getpiobuf(dd, NULL);
772 if (unlikely(piobuf == NULL)) {
778 * Write len to control qword, no flags.
779 * We have to flush after the PBC for correctness on some cpus
780 * or WC buffer can be written out of order.
782 writeq(plen, piobuf);
787 * If there is just the header portion, must flush before
788 * writing last word of header for correctness, and after
789 * the last header word (trigger word).
791 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
793 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
799 __iowrite32_copy(piobuf, hdr, hdrwords);
802 /* The common case is aligned and contained in one segment. */
803 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
804 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
806 u32 *addr = (u32 *) ss->sge.vaddr;
808 /* Update address before sending packet. */
810 /* Need to round up for the last dword in the packet. */
812 __iowrite32_copy(piobuf, addr, w - 1);
813 /* must flush early everything before trigger word */
815 __raw_writel(addr[w - 1], piobuf + w - 1);
816 /* be sure trigger word is written */
821 copy_io(piobuf, ss, len);
828 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
829 u64 *rwords, u64 *spkts, u64 *rpkts,
834 if (!(dd->ipath_flags & IPATH_INITTED)) {
835 /* no hardware, freeze, etc. */
836 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
840 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
841 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
842 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
843 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
844 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
853 * ipath_get_counters - get various chip counters
854 * @dd: the infinipath device
855 * @cntrs: counters are placed here
857 * Return the counters needed by recv_pma_get_portcounters().
859 int ipath_get_counters(struct ipath_devdata *dd,
860 struct ipath_verbs_counters *cntrs)
864 if (!(dd->ipath_flags & IPATH_INITTED)) {
865 /* no hardware, freeze, etc. */
866 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
870 cntrs->symbol_error_counter =
871 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
872 cntrs->link_error_recovery_counter =
873 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
875 * The link downed counter counts when the other side downs the
876 * connection. We add in the number of times we downed the link
877 * due to local link integrity errors to compensate.
879 cntrs->link_downed_counter =
880 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
881 cntrs->port_rcv_errors =
882 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
883 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
885 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
886 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
887 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
888 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
889 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
890 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
891 cntrs->port_rcv_remphys_errors =
892 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
893 cntrs->port_xmit_discards =
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
895 cntrs->port_xmit_data =
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
897 cntrs->port_rcv_data =
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
899 cntrs->port_xmit_packets =
900 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
901 cntrs->port_rcv_packets =
902 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
903 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
904 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
913 * ipath_ib_piobufavail - callback when a PIO buffer is available
914 * @arg: the device pointer
916 * This is called from ipath_intr() at interrupt level when a PIO buffer is
917 * available after ipath_verbs_send() returned an error that no buffers were
918 * available. Return 1 if we consumed all the PIO buffers and we still have
919 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
922 int ipath_ib_piobufavail(struct ipath_ibdev *dev)
930 spin_lock_irqsave(&dev->pending_lock, flags);
931 while (!list_empty(&dev->piowait)) {
932 qp = list_entry(dev->piowait.next, struct ipath_qp,
934 list_del_init(&qp->piowait);
935 tasklet_hi_schedule(&qp->s_task);
937 spin_unlock_irqrestore(&dev->pending_lock, flags);
943 static int ipath_query_device(struct ib_device *ibdev,
944 struct ib_device_attr *props)
946 struct ipath_ibdev *dev = to_idev(ibdev);
948 memset(props, 0, sizeof(*props));
950 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
951 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
952 IB_DEVICE_SYS_IMAGE_GUID;
953 props->page_size_cap = PAGE_SIZE;
954 props->vendor_id = dev->dd->ipath_vendorid;
955 props->vendor_part_id = dev->dd->ipath_deviceid;
956 props->hw_ver = dev->dd->ipath_pcirev;
958 props->sys_image_guid = dev->sys_image_guid;
960 props->max_mr_size = ~0ull;
961 props->max_qp = dev->qp_table.max;
962 props->max_qp_wr = ib_ipath_max_qp_wrs;
963 props->max_sge = ib_ipath_max_sges;
964 props->max_cq = ib_ipath_max_cqs;
965 props->max_ah = ib_ipath_max_ahs;
966 props->max_cqe = ib_ipath_max_cqes;
967 props->max_mr = dev->lk_table.max;
968 props->max_pd = ib_ipath_max_pds;
969 props->max_qp_rd_atom = 1;
970 props->max_qp_init_rd_atom = 1;
971 /* props->max_res_rd_atom */
972 props->max_srq = ib_ipath_max_srqs;
973 props->max_srq_wr = ib_ipath_max_srq_wrs;
974 props->max_srq_sge = ib_ipath_max_srq_sges;
975 /* props->local_ca_ack_delay */
976 props->atomic_cap = IB_ATOMIC_HCA;
977 props->max_pkeys = ipath_get_npkeys(dev->dd);
978 props->max_mcast_grp = ib_ipath_max_mcast_grps;
979 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
980 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
981 props->max_mcast_grp;
986 const u8 ipath_cvt_physportstate[16] = {
987 [INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
988 [INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
989 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
990 [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
991 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
992 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
993 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
994 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
995 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
996 [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
997 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
998 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
999 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
1002 u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1004 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1007 static int ipath_query_port(struct ib_device *ibdev,
1008 u8 port, struct ib_port_attr *props)
1010 struct ipath_ibdev *dev = to_idev(ibdev);
1012 u16 lid = dev->dd->ipath_lid;
1015 memset(props, 0, sizeof(*props));
1016 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1017 props->lmc = dev->mkeyprot_resv_lmc & 7;
1018 props->sm_lid = dev->sm_lid;
1019 props->sm_sl = dev->sm_sl;
1020 ibcstat = dev->dd->ipath_lastibcstat;
1021 props->state = ((ibcstat >> 4) & 0x3) + 1;
1022 /* See phys_state_show() */
1023 props->phys_state = ipath_cvt_physportstate[
1024 dev->dd->ipath_lastibcstat & 0xf];
1025 props->port_cap_flags = dev->port_cap_flags;
1026 props->gid_tbl_len = 1;
1027 props->max_msg_sz = 0x80000000;
1028 props->pkey_tbl_len = ipath_get_npkeys(dev->dd);
1029 props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) -
1030 dev->z_pkey_violations;
1031 props->qkey_viol_cntr = dev->qkey_violations;
1032 props->active_width = IB_WIDTH_4X;
1033 /* See rate_show() */
1034 props->active_speed = 1; /* Regular 10Mbs speed. */
1035 props->max_vl_num = 1; /* VLCap = VL0 */
1036 props->init_type_reply = 0;
1038 props->max_mtu = IB_MTU_4096;
1039 switch (dev->dd->ipath_ibmtu) {
1058 props->active_mtu = mtu;
1059 props->subnet_timeout = dev->subnet_timeout;
1064 static int ipath_modify_device(struct ib_device *device,
1065 int device_modify_mask,
1066 struct ib_device_modify *device_modify)
1070 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1071 IB_DEVICE_MODIFY_NODE_DESC)) {
1076 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
1077 memcpy(device->node_desc, device_modify->node_desc, 64);
1079 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
1080 to_idev(device)->sys_image_guid =
1081 cpu_to_be64(device_modify->sys_image_guid);
1089 static int ipath_modify_port(struct ib_device *ibdev,
1090 u8 port, int port_modify_mask,
1091 struct ib_port_modify *props)
1093 struct ipath_ibdev *dev = to_idev(ibdev);
1095 dev->port_cap_flags |= props->set_port_cap_mask;
1096 dev->port_cap_flags &= ~props->clr_port_cap_mask;
1097 if (port_modify_mask & IB_PORT_SHUTDOWN)
1098 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
1099 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1100 dev->qkey_violations = 0;
1104 static int ipath_query_gid(struct ib_device *ibdev, u8 port,
1105 int index, union ib_gid *gid)
1107 struct ipath_ibdev *dev = to_idev(ibdev);
1114 gid->global.subnet_prefix = dev->gid_prefix;
1115 gid->global.interface_id = dev->dd->ipath_guid;
1123 static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
1124 struct ib_ucontext *context,
1125 struct ib_udata *udata)
1127 struct ipath_ibdev *dev = to_idev(ibdev);
1128 struct ipath_pd *pd;
1132 * This is actually totally arbitrary. Some correctness tests
1133 * assume there's a maximum number of PDs that can be allocated.
1134 * We don't actually have this limit, but we fail the test if
1135 * we allow allocations of more than we report for this value.
1138 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1140 ret = ERR_PTR(-ENOMEM);
1144 spin_lock(&dev->n_pds_lock);
1145 if (dev->n_pds_allocated == ib_ipath_max_pds) {
1146 spin_unlock(&dev->n_pds_lock);
1148 ret = ERR_PTR(-ENOMEM);
1152 dev->n_pds_allocated++;
1153 spin_unlock(&dev->n_pds_lock);
1155 /* ib_alloc_pd() will initialize pd->ibpd. */
1156 pd->user = udata != NULL;
1164 static int ipath_dealloc_pd(struct ib_pd *ibpd)
1166 struct ipath_pd *pd = to_ipd(ibpd);
1167 struct ipath_ibdev *dev = to_idev(ibpd->device);
1169 spin_lock(&dev->n_pds_lock);
1170 dev->n_pds_allocated--;
1171 spin_unlock(&dev->n_pds_lock);
1179 * ipath_create_ah - create an address handle
1180 * @pd: the protection domain
1181 * @ah_attr: the attributes of the AH
1183 * This may be called from interrupt context.
1185 static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1186 struct ib_ah_attr *ah_attr)
1188 struct ipath_ah *ah;
1190 struct ipath_ibdev *dev = to_idev(pd->device);
1192 /* A multicast address requires a GRH (see ch. 8.4.1). */
1193 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
1194 ah_attr->dlid != IPATH_PERMISSIVE_LID &&
1195 !(ah_attr->ah_flags & IB_AH_GRH)) {
1196 ret = ERR_PTR(-EINVAL);
1200 if (ah_attr->dlid == 0) {
1201 ret = ERR_PTR(-EINVAL);
1205 if (ah_attr->port_num < 1 ||
1206 ah_attr->port_num > pd->device->phys_port_cnt) {
1207 ret = ERR_PTR(-EINVAL);
1211 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1213 ret = ERR_PTR(-ENOMEM);
1217 spin_lock(&dev->n_ahs_lock);
1218 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1219 spin_unlock(&dev->n_ahs_lock);
1221 ret = ERR_PTR(-ENOMEM);
1225 dev->n_ahs_allocated++;
1226 spin_unlock(&dev->n_ahs_lock);
1228 /* ib_create_ah() will initialize ah->ibah. */
1229 ah->attr = *ah_attr;
1238 * ipath_destroy_ah - destroy an address handle
1239 * @ibah: the AH to destroy
1241 * This may be called from interrupt context.
1243 static int ipath_destroy_ah(struct ib_ah *ibah)
1245 struct ipath_ibdev *dev = to_idev(ibah->device);
1246 struct ipath_ah *ah = to_iah(ibah);
1248 spin_lock(&dev->n_ahs_lock);
1249 dev->n_ahs_allocated--;
1250 spin_unlock(&dev->n_ahs_lock);
1257 static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1259 struct ipath_ah *ah = to_iah(ibah);
1261 *ah_attr = ah->attr;
1267 * ipath_get_npkeys - return the size of the PKEY table for port 0
1268 * @dd: the infinipath device
1270 unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1272 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1276 * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1277 * @dd: the infinipath device
1278 * @index: the PKEY index
1280 unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1284 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1287 ret = dd->ipath_pd[0]->port_pkeys[index];
1292 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1295 struct ipath_ibdev *dev = to_idev(ibdev);
1298 if (index >= ipath_get_npkeys(dev->dd)) {
1303 *pkey = ipath_get_pkey(dev->dd, index);
1311 * ipath_alloc_ucontext - allocate a ucontest
1312 * @ibdev: the infiniband device
1313 * @udata: not used by the InfiniPath driver
1316 static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
1317 struct ib_udata *udata)
1319 struct ipath_ucontext *context;
1320 struct ib_ucontext *ret;
1322 context = kmalloc(sizeof *context, GFP_KERNEL);
1324 ret = ERR_PTR(-ENOMEM);
1328 ret = &context->ibucontext;
1334 static int ipath_dealloc_ucontext(struct ib_ucontext *context)
1336 kfree(to_iucontext(context));
1340 static int ipath_verbs_register_sysfs(struct ib_device *dev);
1342 static void __verbs_timer(unsigned long arg)
1344 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1347 * If port 0 receive packet interrupts are not available, or
1348 * can be missed, poll the receive queue
1350 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
1353 /* Handle verbs layer timeouts. */
1354 ipath_ib_timer(dd->verbs_dev);
1356 mod_timer(&dd->verbs_timer, jiffies + 1);
1359 static int enable_timer(struct ipath_devdata *dd)
1362 * Early chips had a design flaw where the chip and kernel idea
1363 * of the tail register don't always agree, and therefore we won't
1364 * get an interrupt on the next packet received.
1365 * If the board supports per packet receive interrupts, use it.
1366 * Otherwise, the timer function periodically checks for packets
1367 * to cover this case.
1368 * Either way, the timer is needed for verbs layer related
1371 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1372 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1373 0x2074076542310ULL);
1374 /* Enable GPIO bit 2 interrupt */
1375 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1379 init_timer(&dd->verbs_timer);
1380 dd->verbs_timer.function = __verbs_timer;
1381 dd->verbs_timer.data = (unsigned long)dd;
1382 dd->verbs_timer.expires = jiffies + 1;
1383 add_timer(&dd->verbs_timer);
1388 static int disable_timer(struct ipath_devdata *dd)
1390 /* Disable GPIO bit 2 interrupt */
1391 if (dd->ipath_flags & IPATH_GPIO_INTR)
1392 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1394 del_timer_sync(&dd->verbs_timer);
1400 * ipath_register_ib_device - register our device with the infiniband core
1401 * @dd: the device data structure
1402 * Return the allocated ipath_ibdev pointer or NULL on error.
1404 int ipath_register_ib_device(struct ipath_devdata *dd)
1406 struct ipath_verbs_counters cntrs;
1407 struct ipath_ibdev *idev;
1408 struct ib_device *dev;
1411 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
1419 /* Only need to initialize non-zero fields. */
1420 spin_lock_init(&idev->n_pds_lock);
1421 spin_lock_init(&idev->n_ahs_lock);
1422 spin_lock_init(&idev->n_cqs_lock);
1423 spin_lock_init(&idev->n_srqs_lock);
1424 spin_lock_init(&idev->n_mcast_grps_lock);
1426 spin_lock_init(&idev->qp_table.lock);
1427 spin_lock_init(&idev->lk_table.lock);
1428 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1429 /* Set the prefix to the default value (see ch. 4.1.1) */
1430 idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
1432 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
1437 * The top ib_ipath_lkey_table_size bits are used to index the
1438 * table. The lower 8 bits can be owned by the user (copied from
1439 * the LKEY). The remaining bits act as a generation number or tag.
1441 idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
1442 idev->lk_table.table = kzalloc(idev->lk_table.max *
1443 sizeof(*idev->lk_table.table),
1445 if (idev->lk_table.table == NULL) {
1449 spin_lock_init(&idev->pending_lock);
1450 INIT_LIST_HEAD(&idev->pending[0]);
1451 INIT_LIST_HEAD(&idev->pending[1]);
1452 INIT_LIST_HEAD(&idev->pending[2]);
1453 INIT_LIST_HEAD(&idev->piowait);
1454 INIT_LIST_HEAD(&idev->rnrwait);
1455 idev->pending_index = 0;
1456 idev->port_cap_flags =
1457 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1458 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1459 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1460 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1461 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1462 idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT;
1463 idev->link_width_enabled = 3; /* 1x or 4x */
1465 /* Snapshot current HW counters to "clear" them. */
1466 ipath_get_counters(dd, &cntrs);
1467 idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1468 idev->z_link_error_recovery_counter =
1469 cntrs.link_error_recovery_counter;
1470 idev->z_link_downed_counter = cntrs.link_downed_counter;
1471 idev->z_port_rcv_errors = cntrs.port_rcv_errors;
1472 idev->z_port_rcv_remphys_errors =
1473 cntrs.port_rcv_remphys_errors;
1474 idev->z_port_xmit_discards = cntrs.port_xmit_discards;
1475 idev->z_port_xmit_data = cntrs.port_xmit_data;
1476 idev->z_port_rcv_data = cntrs.port_rcv_data;
1477 idev->z_port_xmit_packets = cntrs.port_xmit_packets;
1478 idev->z_port_rcv_packets = cntrs.port_rcv_packets;
1479 idev->z_local_link_integrity_errors =
1480 cntrs.local_link_integrity_errors;
1481 idev->z_excessive_buffer_overrun_errors =
1482 cntrs.excessive_buffer_overrun_errors;
1485 * The system image GUID is supposed to be the same for all
1486 * IB HCAs in a single system but since there can be other
1487 * device types in the system, we can't be sure this is unique.
1489 if (!sys_image_guid)
1490 sys_image_guid = dd->ipath_guid;
1491 idev->sys_image_guid = sys_image_guid;
1492 idev->ib_unit = dd->ipath_unit;
1495 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1496 dev->owner = THIS_MODULE;
1497 dev->node_guid = dd->ipath_guid;
1498 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1499 dev->uverbs_cmd_mask =
1500 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1501 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1502 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1503 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1504 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1505 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
1506 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
1507 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
1508 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1509 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1510 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1511 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1512 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1513 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1514 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1515 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1516 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1517 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1518 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1519 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1520 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1521 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
1522 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1523 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1524 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1525 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1526 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1527 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1528 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1529 dev->node_type = IB_NODE_CA;
1530 dev->phys_port_cnt = 1;
1531 dev->dma_device = &dd->pcidev->dev;
1532 dev->class_dev.dev = dev->dma_device;
1533 dev->query_device = ipath_query_device;
1534 dev->modify_device = ipath_modify_device;
1535 dev->query_port = ipath_query_port;
1536 dev->modify_port = ipath_modify_port;
1537 dev->query_pkey = ipath_query_pkey;
1538 dev->query_gid = ipath_query_gid;
1539 dev->alloc_ucontext = ipath_alloc_ucontext;
1540 dev->dealloc_ucontext = ipath_dealloc_ucontext;
1541 dev->alloc_pd = ipath_alloc_pd;
1542 dev->dealloc_pd = ipath_dealloc_pd;
1543 dev->create_ah = ipath_create_ah;
1544 dev->destroy_ah = ipath_destroy_ah;
1545 dev->query_ah = ipath_query_ah;
1546 dev->create_srq = ipath_create_srq;
1547 dev->modify_srq = ipath_modify_srq;
1548 dev->query_srq = ipath_query_srq;
1549 dev->destroy_srq = ipath_destroy_srq;
1550 dev->create_qp = ipath_create_qp;
1551 dev->modify_qp = ipath_modify_qp;
1552 dev->query_qp = ipath_query_qp;
1553 dev->destroy_qp = ipath_destroy_qp;
1554 dev->post_send = ipath_post_send;
1555 dev->post_recv = ipath_post_receive;
1556 dev->post_srq_recv = ipath_post_srq_receive;
1557 dev->create_cq = ipath_create_cq;
1558 dev->destroy_cq = ipath_destroy_cq;
1559 dev->resize_cq = ipath_resize_cq;
1560 dev->poll_cq = ipath_poll_cq;
1561 dev->req_notify_cq = ipath_req_notify_cq;
1562 dev->get_dma_mr = ipath_get_dma_mr;
1563 dev->reg_phys_mr = ipath_reg_phys_mr;
1564 dev->reg_user_mr = ipath_reg_user_mr;
1565 dev->dereg_mr = ipath_dereg_mr;
1566 dev->alloc_fmr = ipath_alloc_fmr;
1567 dev->map_phys_fmr = ipath_map_phys_fmr;
1568 dev->unmap_fmr = ipath_unmap_fmr;
1569 dev->dealloc_fmr = ipath_dealloc_fmr;
1570 dev->attach_mcast = ipath_multicast_attach;
1571 dev->detach_mcast = ipath_multicast_detach;
1572 dev->process_mad = ipath_process_mad;
1573 dev->mmap = ipath_mmap;
1575 snprintf(dev->node_desc, sizeof(dev->node_desc),
1576 IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename);
1578 ret = ib_register_device(dev);
1582 if (ipath_verbs_register_sysfs(dev))
1590 ib_unregister_device(dev);
1592 kfree(idev->lk_table.table);
1594 kfree(idev->qp_table.table);
1596 ib_dealloc_device(dev);
1597 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1601 dd->verbs_dev = idev;
1605 void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1607 struct ib_device *ibdev = &dev->ibdev;
1609 disable_timer(dev->dd);
1611 ib_unregister_device(ibdev);
1613 if (!list_empty(&dev->pending[0]) ||
1614 !list_empty(&dev->pending[1]) ||
1615 !list_empty(&dev->pending[2]))
1616 ipath_dev_err(dev->dd, "pending list not empty!\n");
1617 if (!list_empty(&dev->piowait))
1618 ipath_dev_err(dev->dd, "piowait list not empty!\n");
1619 if (!list_empty(&dev->rnrwait))
1620 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
1621 if (!ipath_mcast_tree_empty())
1622 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
1624 * Note that ipath_unregister_ib_device() can be called before all
1625 * the QPs are destroyed!
1627 ipath_free_all_qps(&dev->qp_table);
1628 kfree(dev->qp_table.table);
1629 kfree(dev->lk_table.table);
1630 ib_dealloc_device(ibdev);
1633 static ssize_t show_rev(struct class_device *cdev, char *buf)
1635 struct ipath_ibdev *dev =
1636 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1638 return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1641 static ssize_t show_hca(struct class_device *cdev, char *buf)
1643 struct ipath_ibdev *dev =
1644 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1647 ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1657 static ssize_t show_stats(struct class_device *cdev, char *buf)
1659 struct ipath_ibdev *dev =
1660 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1678 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1679 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1680 dev->n_other_naks, dev->n_timeouts,
1681 dev->n_rdma_dup_busy, dev->n_piowait,
1682 dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
1683 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1684 const struct ipath_opcode_stats *si = &dev->opstats[i];
1686 if (!si->n_packets && !si->n_bytes)
1688 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
1689 (unsigned long long) si->n_packets,
1690 (unsigned long long) si->n_bytes);
1695 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1696 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1697 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
1698 static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
1700 static struct class_device_attribute *ipath_class_attributes[] = {
1701 &class_device_attr_hw_rev,
1702 &class_device_attr_hca_type,
1703 &class_device_attr_board_id,
1704 &class_device_attr_stats
1707 static int ipath_verbs_register_sysfs(struct ib_device *dev)
1712 for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
1713 if (class_device_create_file(&dev->class_dev,
1714 ipath_class_attributes[i])) {