2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
42 #include <linux/random.h>
43 #include <linux/vmalloc.h>
46 #include "qib_common.h"
48 static unsigned int ib_qib_qp_table_size = 256;
49 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
50 MODULE_PARM_DESC(qp_table_size, "QP table size");
52 unsigned int ib_qib_lkey_table_size = 16;
53 module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
55 MODULE_PARM_DESC(lkey_table_size,
56 "LKEY table size in bits (2^n, 1 <= n <= 23)");
58 static unsigned int ib_qib_max_pds = 0xFFFF;
59 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
60 MODULE_PARM_DESC(max_pds,
61 "Maximum number of protection domains to support");
63 static unsigned int ib_qib_max_ahs = 0xFFFF;
64 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
65 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67 unsigned int ib_qib_max_cqes = 0x2FFFF;
68 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
69 MODULE_PARM_DESC(max_cqes,
70 "Maximum number of completion queue entries to support");
72 unsigned int ib_qib_max_cqs = 0x1FFFF;
73 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
74 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
77 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
78 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
80 unsigned int ib_qib_max_qps = 16384;
81 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
82 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
84 unsigned int ib_qib_max_sges = 0x60;
85 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
86 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
88 unsigned int ib_qib_max_mcast_grps = 16384;
89 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
90 MODULE_PARM_DESC(max_mcast_grps,
91 "Maximum number of multicast groups to support");
93 unsigned int ib_qib_max_mcast_qp_attached = 16;
94 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
96 MODULE_PARM_DESC(max_mcast_qp_attached,
97 "Maximum number of attached QPs to support");
99 unsigned int ib_qib_max_srqs = 1024;
100 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
101 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
103 unsigned int ib_qib_max_srq_sges = 128;
104 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
105 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
108 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
109 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111 static unsigned int ib_qib_disable_sma;
112 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
113 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
116 * Note that it is OK to post send work requests in the SQE and ERR
117 * states; qib_do_send() will process them and generate error
118 * completions as per IB 1.2 C10-96.
120 const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
122 [IB_QPS_INIT] = QIB_POST_RECV_OK,
123 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
124 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
125 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
126 QIB_PROCESS_NEXT_SEND_OK,
127 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
128 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
129 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
130 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
131 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
132 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
135 struct qib_ucontext {
136 struct ib_ucontext ibucontext;
139 static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
142 return container_of(ibucontext, struct qib_ucontext, ibucontext);
146 * Translate ib_wr_opcode into ib_wc_opcode.
148 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
149 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
150 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
151 [IB_WR_SEND] = IB_WC_SEND,
152 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
153 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
154 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
155 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
161 __be64 ib_qib_sys_image_guid;
164 * qib_copy_sge - copy data to SGE memory
166 * @data: the data to copy
167 * @length: the length of the data
169 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
171 struct qib_sge *sge = &ss->sge;
174 u32 len = sge->length;
178 if (len > sge->sge_length)
179 len = sge->sge_length;
181 memcpy(sge->vaddr, data, len);
184 sge->sge_length -= len;
185 if (sge->sge_length == 0) {
187 atomic_dec(&sge->mr->refcount);
189 *sge = *ss->sg_list++;
190 } else if (sge->length == 0 && sge->mr->lkey) {
191 if (++sge->n >= QIB_SEGSZ) {
192 if (++sge->m >= sge->mr->mapsz)
197 sge->mr->map[sge->m]->segs[sge->n].vaddr;
199 sge->mr->map[sge->m]->segs[sge->n].length;
207 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
209 * @length: the number of bytes to skip
211 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
213 struct qib_sge *sge = &ss->sge;
216 u32 len = sge->length;
220 if (len > sge->sge_length)
221 len = sge->sge_length;
225 sge->sge_length -= len;
226 if (sge->sge_length == 0) {
228 atomic_dec(&sge->mr->refcount);
230 *sge = *ss->sg_list++;
231 } else if (sge->length == 0 && sge->mr->lkey) {
232 if (++sge->n >= QIB_SEGSZ) {
233 if (++sge->m >= sge->mr->mapsz)
238 sge->mr->map[sge->m]->segs[sge->n].vaddr;
240 sge->mr->map[sge->m]->segs[sge->n].length;
247 * Count the number of DMA descriptors needed to send length bytes of data.
248 * Don't modify the qib_sge_state to get the count.
249 * Return zero if any of the segments is not aligned.
251 static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
253 struct qib_sge *sg_list = ss->sg_list;
254 struct qib_sge sge = ss->sge;
255 u8 num_sge = ss->num_sge;
256 u32 ndesc = 1; /* count the header */
259 u32 len = sge.length;
263 if (len > sge.sge_length)
264 len = sge.sge_length;
266 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
267 (len != length && (len & (sizeof(u32) - 1)))) {
274 sge.sge_length -= len;
275 if (sge.sge_length == 0) {
278 } else if (sge.length == 0 && sge.mr->lkey) {
279 if (++sge.n >= QIB_SEGSZ) {
280 if (++sge.m >= sge.mr->mapsz)
285 sge.mr->map[sge.m]->segs[sge.n].vaddr;
287 sge.mr->map[sge.m]->segs[sge.n].length;
295 * Copy from the SGEs to the data buffer.
297 static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
299 struct qib_sge *sge = &ss->sge;
302 u32 len = sge->length;
306 if (len > sge->sge_length)
307 len = sge->sge_length;
309 memcpy(data, sge->vaddr, len);
312 sge->sge_length -= len;
313 if (sge->sge_length == 0) {
315 *sge = *ss->sg_list++;
316 } else if (sge->length == 0 && sge->mr->lkey) {
317 if (++sge->n >= QIB_SEGSZ) {
318 if (++sge->m >= sge->mr->mapsz)
323 sge->mr->map[sge->m]->segs[sge->n].vaddr;
325 sge->mr->map[sge->m]->segs[sge->n].length;
333 * qib_post_one_send - post one RC, UC, or UD send work request
334 * @qp: the QP to post on
335 * @wr: the work request to send
337 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
339 struct qib_swqe *wqe;
346 struct qib_lkey_table *rkt;
349 spin_lock_irqsave(&qp->s_lock, flags);
351 /* Check that state is OK to post send. */
352 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
355 /* IB spec says that num_sge == 0 is OK. */
356 if (wr->num_sge > qp->s_max_sge)
360 * Don't allow RDMA reads or atomic operations on UC or
361 * undefined operations.
362 * Make sure buffer is large enough to hold the result for atomics.
364 if (wr->opcode == IB_WR_FAST_REG_MR) {
365 if (qib_fast_reg_mr(qp, wr))
367 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
368 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
370 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
371 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
372 if (wr->opcode != IB_WR_SEND &&
373 wr->opcode != IB_WR_SEND_WITH_IMM)
375 /* Check UD destination address PD */
376 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
378 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
380 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
382 wr->sg_list[0].length < sizeof(u64) ||
383 wr->sg_list[0].addr & (sizeof(u64) - 1)))
385 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
388 next = qp->s_head + 1;
389 if (next >= qp->s_size)
391 if (next == qp->s_last) {
396 rkt = &to_idev(qp->ibqp.device)->lk_table;
397 pd = to_ipd(qp->ibqp.pd);
398 wqe = get_swqe_ptr(qp, qp->s_head);
403 acc = wr->opcode >= IB_WR_RDMA_READ ?
404 IB_ACCESS_LOCAL_WRITE : 0;
405 for (i = 0; i < wr->num_sge; i++) {
406 u32 length = wr->sg_list[i].length;
411 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
412 &wr->sg_list[i], acc);
414 goto bail_inval_free;
415 wqe->length += length;
420 if (qp->ibqp.qp_type == IB_QPT_UC ||
421 qp->ibqp.qp_type == IB_QPT_RC) {
422 if (wqe->length > 0x80000000U)
423 goto bail_inval_free;
424 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
425 qp->port_num - 1)->ibmtu)
426 goto bail_inval_free;
428 atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
429 wqe->ssn = qp->s_ssn++;
437 struct qib_sge *sge = &wqe->sg_list[--j];
439 atomic_dec(&sge->mr->refcount);
444 spin_unlock_irqrestore(&qp->s_lock, flags);
449 * qib_post_send - post a send on a QP
450 * @ibqp: the QP to post the send on
451 * @wr: the list of work requests to post
452 * @bad_wr: the first bad WR is put here
454 * This may be called from interrupt context.
456 static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
457 struct ib_send_wr **bad_wr)
459 struct qib_qp *qp = to_iqp(ibqp);
462 for (; wr; wr = wr->next) {
463 err = qib_post_one_send(qp, wr);
470 /* Try to do the send work in the caller's context. */
471 qib_do_send(&qp->s_work);
478 * qib_post_receive - post a receive on a QP
479 * @ibqp: the QP to post the receive on
480 * @wr: the WR to post
481 * @bad_wr: the first bad WR is put here
483 * This may be called from interrupt context.
485 static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
486 struct ib_recv_wr **bad_wr)
488 struct qib_qp *qp = to_iqp(ibqp);
489 struct qib_rwq *wq = qp->r_rq.wq;
493 /* Check that state is OK to post receive. */
494 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
500 for (; wr; wr = wr->next) {
501 struct qib_rwqe *wqe;
505 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
511 spin_lock_irqsave(&qp->r_rq.lock, flags);
513 if (next >= qp->r_rq.size)
515 if (next == wq->tail) {
516 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
522 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
523 wqe->wr_id = wr->wr_id;
524 wqe->num_sge = wr->num_sge;
525 for (i = 0; i < wr->num_sge; i++)
526 wqe->sg_list[i] = wr->sg_list[i];
527 /* Make sure queue entry is written before the head index. */
530 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
539 * qib_qp_rcv - processing an incoming packet on a QP
540 * @rcd: the context pointer
541 * @hdr: the packet header
542 * @has_grh: true if the packet has a GRH
543 * @data: the packet data
544 * @tlen: the packet length
545 * @qp: the QP the packet came on
547 * This is called from qib_ib_rcv() to process an incoming packet
549 * Called at interrupt level.
551 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
552 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
554 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
556 spin_lock(&qp->r_lock);
558 /* Check for valid receive state. */
559 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
564 switch (qp->ibqp.qp_type) {
567 if (ib_qib_disable_sma)
571 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
575 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
579 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
587 spin_unlock(&qp->r_lock);
591 * qib_ib_rcv - process an incoming packet
592 * @rcd: the context pointer
593 * @rhdr: the header of the packet
594 * @data: the packet payload
595 * @tlen: the packet length
597 * This is called from qib_kreceive() to process an incoming packet at
598 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
600 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
602 struct qib_pportdata *ppd = rcd->ppd;
603 struct qib_ibport *ibp = &ppd->ibport_data;
604 struct qib_ib_header *hdr = rhdr;
605 struct qib_other_headers *ohdr;
612 /* 24 == LRH+BTH+CRC */
613 if (unlikely(tlen < 24))
616 /* Check for a valid destination LID (see ch. 7.11.1). */
617 lid = be16_to_cpu(hdr->lrh[1]);
618 if (lid < QIB_MULTICAST_LID_BASE) {
619 lid &= ~((1 << ppd->lmc) - 1);
620 if (unlikely(lid != ppd->lid))
625 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
626 if (lnh == QIB_LRH_BTH)
628 else if (lnh == QIB_LRH_GRH) {
631 ohdr = &hdr->u.l.oth;
632 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
634 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
635 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
640 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
641 ibp->opstats[opcode & 0x7f].n_bytes += tlen;
642 ibp->opstats[opcode & 0x7f].n_packets++;
644 /* Get the destination QP number. */
645 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
646 if (qp_num == QIB_MULTICAST_QPN) {
647 struct qib_mcast *mcast;
648 struct qib_mcast_qp *p;
650 if (lnh != QIB_LRH_GRH)
652 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
655 ibp->n_multicast_rcv++;
656 list_for_each_entry_rcu(p, &mcast->qp_list, list)
657 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
659 * Notify qib_multicast_detach() if it is waiting for us
662 if (atomic_dec_return(&mcast->refcount) <= 1)
663 wake_up(&mcast->wait);
665 if (rcd->lookaside_qp) {
666 if (rcd->lookaside_qpn != qp_num) {
667 if (atomic_dec_and_test(
668 &rcd->lookaside_qp->refcount))
670 &rcd->lookaside_qp->wait);
671 rcd->lookaside_qp = NULL;
674 if (!rcd->lookaside_qp) {
675 qp = qib_lookup_qpn(ibp, qp_num);
678 rcd->lookaside_qp = qp;
679 rcd->lookaside_qpn = qp_num;
681 qp = rcd->lookaside_qp;
682 ibp->n_unicast_rcv++;
683 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
692 * This is called from a timer to check for QPs
693 * which need kernel memory in order to send a packet.
695 static void mem_timer(unsigned long data)
697 struct qib_ibdev *dev = (struct qib_ibdev *) data;
698 struct list_head *list = &dev->memwait;
699 struct qib_qp *qp = NULL;
702 spin_lock_irqsave(&dev->pending_lock, flags);
703 if (!list_empty(list)) {
704 qp = list_entry(list->next, struct qib_qp, iowait);
705 list_del_init(&qp->iowait);
706 atomic_inc(&qp->refcount);
707 if (!list_empty(list))
708 mod_timer(&dev->mem_timer, jiffies + 1);
710 spin_unlock_irqrestore(&dev->pending_lock, flags);
713 spin_lock_irqsave(&qp->s_lock, flags);
714 if (qp->s_flags & QIB_S_WAIT_KMEM) {
715 qp->s_flags &= ~QIB_S_WAIT_KMEM;
716 qib_schedule_send(qp);
718 spin_unlock_irqrestore(&qp->s_lock, flags);
719 if (atomic_dec_and_test(&qp->refcount))
724 static void update_sge(struct qib_sge_state *ss, u32 length)
726 struct qib_sge *sge = &ss->sge;
728 sge->vaddr += length;
729 sge->length -= length;
730 sge->sge_length -= length;
731 if (sge->sge_length == 0) {
733 *sge = *ss->sg_list++;
734 } else if (sge->length == 0 && sge->mr->lkey) {
735 if (++sge->n >= QIB_SEGSZ) {
736 if (++sge->m >= sge->mr->mapsz)
740 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
741 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
745 #ifdef __LITTLE_ENDIAN
746 static inline u32 get_upper_bits(u32 data, u32 shift)
748 return data >> shift;
751 static inline u32 set_upper_bits(u32 data, u32 shift)
753 return data << shift;
756 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
758 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
759 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
763 static inline u32 get_upper_bits(u32 data, u32 shift)
765 return data << shift;
768 static inline u32 set_upper_bits(u32 data, u32 shift)
770 return data >> shift;
773 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
775 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
776 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
781 static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
782 u32 length, unsigned flush_wc)
789 u32 len = ss->sge.length;
794 if (len > ss->sge.sge_length)
795 len = ss->sge.sge_length;
797 /* If the source address is not aligned, try to align it. */
798 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
800 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
802 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
805 y = sizeof(u32) - off;
808 if (len + extra >= sizeof(u32)) {
809 data |= set_upper_bits(v, extra *
811 len = sizeof(u32) - extra;
816 __raw_writel(data, piobuf);
821 /* Clear unused upper bytes */
822 data |= clear_upper_bytes(v, len, extra);
830 /* Source address is aligned. */
831 u32 *addr = (u32 *) ss->sge.vaddr;
832 int shift = extra * BITS_PER_BYTE;
833 int ushift = 32 - shift;
836 while (l >= sizeof(u32)) {
839 data |= set_upper_bits(v, shift);
840 __raw_writel(data, piobuf);
841 data = get_upper_bits(v, ushift);
847 * We still have 'extra' number of bytes leftover.
852 if (l + extra >= sizeof(u32)) {
853 data |= set_upper_bits(v, shift);
854 len -= l + extra - sizeof(u32);
859 __raw_writel(data, piobuf);
864 /* Clear unused upper bytes */
865 data |= clear_upper_bytes(v, l, extra);
872 } else if (len == length) {
876 } else if (len == length) {
880 * Need to round up for the last dword in the
884 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
886 last = ((u32 *) ss->sge.vaddr)[w - 1];
891 qib_pio_copy(piobuf, ss->sge.vaddr, w);
894 extra = len & (sizeof(u32) - 1);
896 u32 v = ((u32 *) ss->sge.vaddr)[w];
898 /* Clear unused upper bytes */
899 data = clear_upper_bytes(v, extra, 0);
905 /* Update address before sending packet. */
906 update_sge(ss, length);
908 /* must flush early everything before trigger word */
910 __raw_writel(last, piobuf);
911 /* be sure trigger word is written */
914 __raw_writel(last, piobuf);
917 static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
918 struct qib_qp *qp, int *retp)
920 struct qib_verbs_txreq *tx;
923 spin_lock_irqsave(&qp->s_lock, flags);
924 spin_lock(&dev->pending_lock);
926 if (!list_empty(&dev->txreq_free)) {
927 struct list_head *l = dev->txreq_free.next;
930 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
933 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
934 list_empty(&qp->iowait)) {
936 qp->s_flags |= QIB_S_WAIT_TX;
937 list_add_tail(&qp->iowait, &dev->txwait);
940 qp->s_flags &= ~QIB_S_BUSY;
944 spin_unlock(&dev->pending_lock);
945 spin_unlock_irqrestore(&qp->s_lock, flags);
950 void qib_put_txreq(struct qib_verbs_txreq *tx)
952 struct qib_ibdev *dev;
957 dev = to_idev(qp->ibqp.device);
959 if (atomic_dec_and_test(&qp->refcount))
962 atomic_dec(&tx->mr->refcount);
965 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
966 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
967 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
968 tx->txreq.addr, tx->hdr_dwords << 2,
970 kfree(tx->align_buf);
973 spin_lock_irqsave(&dev->pending_lock, flags);
975 /* Put struct back on free list */
976 list_add(&tx->txreq.list, &dev->txreq_free);
978 if (!list_empty(&dev->txwait)) {
979 /* Wake up first QP wanting a free struct */
980 qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
981 list_del_init(&qp->iowait);
982 atomic_inc(&qp->refcount);
983 spin_unlock_irqrestore(&dev->pending_lock, flags);
985 spin_lock_irqsave(&qp->s_lock, flags);
986 if (qp->s_flags & QIB_S_WAIT_TX) {
987 qp->s_flags &= ~QIB_S_WAIT_TX;
988 qib_schedule_send(qp);
990 spin_unlock_irqrestore(&qp->s_lock, flags);
992 if (atomic_dec_and_test(&qp->refcount))
995 spin_unlock_irqrestore(&dev->pending_lock, flags);
999 * This is called when there are send DMA descriptors that might be
1002 * This is called with ppd->sdma_lock held.
1004 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1006 struct qib_qp *qp, *nqp;
1007 struct qib_qp *qps[20];
1008 struct qib_ibdev *dev;
1012 dev = &ppd->dd->verbs_dev;
1013 spin_lock(&dev->pending_lock);
1015 /* Search wait list for first QP wanting DMA descriptors. */
1016 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
1017 if (qp->port_num != ppd->port)
1019 if (n == ARRAY_SIZE(qps))
1021 if (qp->s_tx->txreq.sg_count > avail)
1023 avail -= qp->s_tx->txreq.sg_count;
1024 list_del_init(&qp->iowait);
1025 atomic_inc(&qp->refcount);
1029 spin_unlock(&dev->pending_lock);
1031 for (i = 0; i < n; i++) {
1033 spin_lock(&qp->s_lock);
1034 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1035 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1036 qib_schedule_send(qp);
1038 spin_unlock(&qp->s_lock);
1039 if (atomic_dec_and_test(&qp->refcount))
1045 * This is called with ppd->sdma_lock held.
1047 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1049 struct qib_verbs_txreq *tx =
1050 container_of(cookie, struct qib_verbs_txreq, txreq);
1051 struct qib_qp *qp = tx->qp;
1053 spin_lock(&qp->s_lock);
1055 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1056 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1057 struct qib_ib_header *hdr;
1059 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1060 hdr = &tx->align_buf->hdr;
1062 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1064 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1066 qib_rc_send_complete(qp, hdr);
1068 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1069 if (qp->state == IB_QPS_RESET)
1070 wake_up(&qp->wait_dma);
1071 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1072 qp->s_flags &= ~QIB_S_WAIT_DMA;
1073 qib_schedule_send(qp);
1076 spin_unlock(&qp->s_lock);
1081 static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
1083 unsigned long flags;
1086 spin_lock_irqsave(&qp->s_lock, flags);
1087 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1088 spin_lock(&dev->pending_lock);
1089 if (list_empty(&qp->iowait)) {
1090 if (list_empty(&dev->memwait))
1091 mod_timer(&dev->mem_timer, jiffies + 1);
1092 qp->s_flags |= QIB_S_WAIT_KMEM;
1093 list_add_tail(&qp->iowait, &dev->memwait);
1095 spin_unlock(&dev->pending_lock);
1096 qp->s_flags &= ~QIB_S_BUSY;
1099 spin_unlock_irqrestore(&qp->s_lock, flags);
1104 static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
1105 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1106 u32 plen, u32 dwords)
1108 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1109 struct qib_devdata *dd = dd_from_dev(dev);
1110 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1111 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1112 struct qib_verbs_txreq *tx;
1113 struct qib_pio_header *phdr;
1121 /* resend previously constructed packet */
1122 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1126 tx = get_txreq(dev, qp, &ret);
1130 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1131 be16_to_cpu(hdr->lrh[0]) >> 12);
1133 atomic_inc(&qp->refcount);
1134 tx->wqe = qp->s_wqe;
1135 tx->mr = qp->s_rdma_mr;
1137 qp->s_rdma_mr = NULL;
1138 tx->txreq.callback = sdma_complete;
1139 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1140 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1142 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1143 if (plen + 1 > dd->piosize2kmax_dwords)
1144 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1148 * Don't try to DMA if it takes more descriptors than
1151 ndesc = qib_count_sge(ss, len);
1152 if (ndesc >= ppd->sdma_descq_cnt)
1157 phdr = &dev->pio_hdrs[tx->hdr_inx];
1158 phdr->pbc[0] = cpu_to_le32(plen);
1159 phdr->pbc[1] = cpu_to_le32(control);
1160 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1161 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1162 tx->txreq.sg_count = ndesc;
1163 tx->txreq.addr = dev->pio_hdrs_phys +
1164 tx->hdr_inx * sizeof(struct qib_pio_header);
1165 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1166 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1170 /* Allocate a buffer and copy the header and payload to it. */
1171 tx->hdr_dwords = plen + 1;
1172 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1175 phdr->pbc[0] = cpu_to_le32(plen);
1176 phdr->pbc[1] = cpu_to_le32(control);
1177 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1178 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1180 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1181 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1182 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1184 tx->align_buf = phdr;
1185 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1186 tx->txreq.sg_count = 1;
1187 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1194 ret = wait_kmem(dev, qp);
1202 * If we are now in the error state, return zero to flush the
1203 * send work request.
1205 static int no_bufs_available(struct qib_qp *qp)
1207 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1208 struct qib_devdata *dd;
1209 unsigned long flags;
1213 * Note that as soon as want_buffer() is called and
1214 * possibly before it returns, qib_ib_piobufavail()
1215 * could be called. Therefore, put QP on the I/O wait list before
1216 * enabling the PIO avail interrupt.
1218 spin_lock_irqsave(&qp->s_lock, flags);
1219 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1220 spin_lock(&dev->pending_lock);
1221 if (list_empty(&qp->iowait)) {
1223 qp->s_flags |= QIB_S_WAIT_PIO;
1224 list_add_tail(&qp->iowait, &dev->piowait);
1225 dd = dd_from_dev(dev);
1226 dd->f_wantpiobuf_intr(dd, 1);
1228 spin_unlock(&dev->pending_lock);
1229 qp->s_flags &= ~QIB_S_BUSY;
1232 spin_unlock_irqrestore(&qp->s_lock, flags);
1236 static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1237 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1238 u32 plen, u32 dwords)
1240 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1241 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1242 u32 *hdr = (u32 *) ibhdr;
1243 u32 __iomem *piobuf_orig;
1244 u32 __iomem *piobuf;
1246 unsigned long flags;
1251 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1252 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1253 pbc = ((u64) control << 32) | plen;
1254 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1255 if (unlikely(piobuf == NULL))
1256 return no_bufs_available(qp);
1260 * We have to flush after the PBC for correctness on some cpus
1261 * or WC buffer can be written out of order.
1263 writeq(pbc, piobuf);
1264 piobuf_orig = piobuf;
1267 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1270 * If there is just the header portion, must flush before
1271 * writing last word of header for correctness, and after
1272 * the last header word (trigger word).
1276 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1278 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1281 qib_pio_copy(piobuf, hdr, hdrwords);
1287 qib_pio_copy(piobuf, hdr, hdrwords);
1290 /* The common case is aligned and contained in one segment. */
1291 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1292 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1293 u32 *addr = (u32 *) ss->sge.vaddr;
1295 /* Update address before sending packet. */
1296 update_sge(ss, len);
1298 qib_pio_copy(piobuf, addr, dwords - 1);
1299 /* must flush early everything before trigger word */
1301 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1302 /* be sure trigger word is written */
1305 qib_pio_copy(piobuf, addr, dwords);
1308 copy_io(piobuf, ss, len, flush_wc);
1310 if (dd->flags & QIB_USE_SPCL_TRIG) {
1311 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1313 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1315 qib_sendbuf_done(dd, pbufn);
1316 if (qp->s_rdma_mr) {
1317 atomic_dec(&qp->s_rdma_mr->refcount);
1318 qp->s_rdma_mr = NULL;
1321 spin_lock_irqsave(&qp->s_lock, flags);
1322 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1323 spin_unlock_irqrestore(&qp->s_lock, flags);
1324 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1325 spin_lock_irqsave(&qp->s_lock, flags);
1326 qib_rc_send_complete(qp, ibhdr);
1327 spin_unlock_irqrestore(&qp->s_lock, flags);
1333 * qib_verbs_send - send a packet
1334 * @qp: the QP to send on
1335 * @hdr: the packet header
1336 * @hdrwords: the number of 32-bit words in the header
1337 * @ss: the SGE to send
1338 * @len: the length of the packet in bytes
1340 * Return zero if packet is sent or queued OK.
1341 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1343 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
1344 u32 hdrwords, struct qib_sge_state *ss, u32 len)
1346 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1349 u32 dwords = (len + 3) >> 2;
1352 * Calculate the send buffer trigger address.
1353 * The +1 counts for the pbc control dword following the pbc length.
1355 plen = hdrwords + dwords + 1;
1358 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1359 * can defer SDMA restart until link goes ACTIVE without
1360 * worrying about just how we got there.
1362 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1363 !(dd->flags & QIB_HAS_SEND_DMA))
1364 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1367 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1373 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1374 u64 *rwords, u64 *spkts, u64 *rpkts,
1378 struct qib_devdata *dd = ppd->dd;
1380 if (!(dd->flags & QIB_PRESENT)) {
1381 /* no hardware, freeze, etc. */
1385 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1386 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1387 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1388 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1389 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1398 * qib_get_counters - get various chip counters
1399 * @dd: the qlogic_ib device
1400 * @cntrs: counters are placed here
1402 * Return the counters needed by recv_pma_get_portcounters().
1404 int qib_get_counters(struct qib_pportdata *ppd,
1405 struct qib_verbs_counters *cntrs)
1409 if (!(ppd->dd->flags & QIB_PRESENT)) {
1410 /* no hardware, freeze, etc. */
1414 cntrs->symbol_error_counter =
1415 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1416 cntrs->link_error_recovery_counter =
1417 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1419 * The link downed counter counts when the other side downs the
1420 * connection. We add in the number of times we downed the link
1421 * due to local link integrity errors to compensate.
1423 cntrs->link_downed_counter =
1424 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1425 cntrs->port_rcv_errors =
1426 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1427 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1428 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1429 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1430 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1431 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1432 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1433 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1434 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1435 cntrs->port_rcv_errors +=
1436 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1437 cntrs->port_rcv_errors +=
1438 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1439 cntrs->port_rcv_remphys_errors =
1440 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1441 cntrs->port_xmit_discards =
1442 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1443 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1444 QIBPORTCNTR_WORDSEND);
1445 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1446 QIBPORTCNTR_WORDRCV);
1447 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1448 QIBPORTCNTR_PKTSEND);
1449 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1450 QIBPORTCNTR_PKTRCV);
1451 cntrs->local_link_integrity_errors =
1452 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1453 cntrs->excessive_buffer_overrun_errors =
1454 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1455 cntrs->vl15_dropped =
1456 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1465 * qib_ib_piobufavail - callback when a PIO buffer is available
1466 * @dd: the device pointer
1468 * This is called from qib_intr() at interrupt level when a PIO buffer is
1469 * available after qib_verbs_send() returned an error that no buffers were
1470 * available. Disable the interrupt if there are no more QPs waiting.
1472 void qib_ib_piobufavail(struct qib_devdata *dd)
1474 struct qib_ibdev *dev = &dd->verbs_dev;
1475 struct list_head *list;
1476 struct qib_qp *qps[5];
1478 unsigned long flags;
1481 list = &dev->piowait;
1485 * Note: checking that the piowait list is empty and clearing
1486 * the buffer available interrupt needs to be atomic or we
1487 * could end up with QPs on the wait list with the interrupt
1490 spin_lock_irqsave(&dev->pending_lock, flags);
1491 while (!list_empty(list)) {
1492 if (n == ARRAY_SIZE(qps))
1494 qp = list_entry(list->next, struct qib_qp, iowait);
1495 list_del_init(&qp->iowait);
1496 atomic_inc(&qp->refcount);
1499 dd->f_wantpiobuf_intr(dd, 0);
1501 spin_unlock_irqrestore(&dev->pending_lock, flags);
1503 for (i = 0; i < n; i++) {
1506 spin_lock_irqsave(&qp->s_lock, flags);
1507 if (qp->s_flags & QIB_S_WAIT_PIO) {
1508 qp->s_flags &= ~QIB_S_WAIT_PIO;
1509 qib_schedule_send(qp);
1511 spin_unlock_irqrestore(&qp->s_lock, flags);
1513 /* Notify qib_destroy_qp() if it is waiting. */
1514 if (atomic_dec_and_test(&qp->refcount))
1519 static int qib_query_device(struct ib_device *ibdev,
1520 struct ib_device_attr *props)
1522 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1523 struct qib_ibdev *dev = to_idev(ibdev);
1525 memset(props, 0, sizeof(*props));
1527 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1528 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1529 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1530 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1531 props->page_size_cap = PAGE_SIZE;
1533 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1534 props->vendor_part_id = dd->deviceid;
1535 props->hw_ver = dd->minrev;
1536 props->sys_image_guid = ib_qib_sys_image_guid;
1537 props->max_mr_size = ~0ULL;
1538 props->max_qp = ib_qib_max_qps;
1539 props->max_qp_wr = ib_qib_max_qp_wrs;
1540 props->max_sge = ib_qib_max_sges;
1541 props->max_cq = ib_qib_max_cqs;
1542 props->max_ah = ib_qib_max_ahs;
1543 props->max_cqe = ib_qib_max_cqes;
1544 props->max_mr = dev->lk_table.max;
1545 props->max_fmr = dev->lk_table.max;
1546 props->max_map_per_fmr = 32767;
1547 props->max_pd = ib_qib_max_pds;
1548 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1549 props->max_qp_init_rd_atom = 255;
1550 /* props->max_res_rd_atom */
1551 props->max_srq = ib_qib_max_srqs;
1552 props->max_srq_wr = ib_qib_max_srq_wrs;
1553 props->max_srq_sge = ib_qib_max_srq_sges;
1554 /* props->local_ca_ack_delay */
1555 props->atomic_cap = IB_ATOMIC_GLOB;
1556 props->max_pkeys = qib_get_npkeys(dd);
1557 props->max_mcast_grp = ib_qib_max_mcast_grps;
1558 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1559 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1560 props->max_mcast_grp;
1565 static int qib_query_port(struct ib_device *ibdev, u8 port,
1566 struct ib_port_attr *props)
1568 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1569 struct qib_ibport *ibp = to_iport(ibdev, port);
1570 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1574 memset(props, 0, sizeof(*props));
1575 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1576 props->lmc = ppd->lmc;
1577 props->sm_lid = ibp->sm_lid;
1578 props->sm_sl = ibp->sm_sl;
1579 props->state = dd->f_iblink_state(ppd->lastibcstat);
1580 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1581 props->port_cap_flags = ibp->port_cap_flags;
1582 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1583 props->max_msg_sz = 0x80000000;
1584 props->pkey_tbl_len = qib_get_npkeys(dd);
1585 props->bad_pkey_cntr = ibp->pkey_violations;
1586 props->qkey_viol_cntr = ibp->qkey_violations;
1587 props->active_width = ppd->link_width_active;
1588 /* See rate_show() */
1589 props->active_speed = ppd->link_speed_active;
1590 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1591 props->init_type_reply = 0;
1593 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1594 switch (ppd->ibmtu) {
1613 props->active_mtu = mtu;
1614 props->subnet_timeout = ibp->subnet_timeout;
1619 static int qib_modify_device(struct ib_device *device,
1620 int device_modify_mask,
1621 struct ib_device_modify *device_modify)
1623 struct qib_devdata *dd = dd_from_ibdev(device);
1627 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1628 IB_DEVICE_MODIFY_NODE_DESC)) {
1633 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1634 memcpy(device->node_desc, device_modify->node_desc, 64);
1635 for (i = 0; i < dd->num_pports; i++) {
1636 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1638 qib_node_desc_chg(ibp);
1642 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1643 ib_qib_sys_image_guid =
1644 cpu_to_be64(device_modify->sys_image_guid);
1645 for (i = 0; i < dd->num_pports; i++) {
1646 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1648 qib_sys_guid_chg(ibp);
1658 static int qib_modify_port(struct ib_device *ibdev, u8 port,
1659 int port_modify_mask, struct ib_port_modify *props)
1661 struct qib_ibport *ibp = to_iport(ibdev, port);
1662 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1664 ibp->port_cap_flags |= props->set_port_cap_mask;
1665 ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1666 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1667 qib_cap_mask_chg(ibp);
1668 if (port_modify_mask & IB_PORT_SHUTDOWN)
1669 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1670 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1671 ibp->qkey_violations = 0;
1675 static int qib_query_gid(struct ib_device *ibdev, u8 port,
1676 int index, union ib_gid *gid)
1678 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1681 if (!port || port > dd->num_pports)
1684 struct qib_ibport *ibp = to_iport(ibdev, port);
1685 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1687 gid->global.subnet_prefix = ibp->gid_prefix;
1689 gid->global.interface_id = ppd->guid;
1690 else if (index < QIB_GUIDS_PER_PORT)
1691 gid->global.interface_id = ibp->guids[index - 1];
1699 static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1700 struct ib_ucontext *context,
1701 struct ib_udata *udata)
1703 struct qib_ibdev *dev = to_idev(ibdev);
1708 * This is actually totally arbitrary. Some correctness tests
1709 * assume there's a maximum number of PDs that can be allocated.
1710 * We don't actually have this limit, but we fail the test if
1711 * we allow allocations of more than we report for this value.
1714 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1716 ret = ERR_PTR(-ENOMEM);
1720 spin_lock(&dev->n_pds_lock);
1721 if (dev->n_pds_allocated == ib_qib_max_pds) {
1722 spin_unlock(&dev->n_pds_lock);
1724 ret = ERR_PTR(-ENOMEM);
1728 dev->n_pds_allocated++;
1729 spin_unlock(&dev->n_pds_lock);
1731 /* ib_alloc_pd() will initialize pd->ibpd. */
1732 pd->user = udata != NULL;
1740 static int qib_dealloc_pd(struct ib_pd *ibpd)
1742 struct qib_pd *pd = to_ipd(ibpd);
1743 struct qib_ibdev *dev = to_idev(ibpd->device);
1745 spin_lock(&dev->n_pds_lock);
1746 dev->n_pds_allocated--;
1747 spin_unlock(&dev->n_pds_lock);
1754 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1756 /* A multicast address requires a GRH (see ch. 8.4.1). */
1757 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
1758 ah_attr->dlid != QIB_PERMISSIVE_LID &&
1759 !(ah_attr->ah_flags & IB_AH_GRH))
1761 if ((ah_attr->ah_flags & IB_AH_GRH) &&
1762 ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
1764 if (ah_attr->dlid == 0)
1766 if (ah_attr->port_num < 1 ||
1767 ah_attr->port_num > ibdev->phys_port_cnt)
1769 if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1770 ib_rate_to_mult(ah_attr->static_rate) < 0)
1772 if (ah_attr->sl > 15)
1780 * qib_create_ah - create an address handle
1781 * @pd: the protection domain
1782 * @ah_attr: the attributes of the AH
1784 * This may be called from interrupt context.
1786 static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1787 struct ib_ah_attr *ah_attr)
1791 struct qib_ibdev *dev = to_idev(pd->device);
1792 unsigned long flags;
1794 if (qib_check_ah(pd->device, ah_attr)) {
1795 ret = ERR_PTR(-EINVAL);
1799 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1801 ret = ERR_PTR(-ENOMEM);
1805 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1806 if (dev->n_ahs_allocated == ib_qib_max_ahs) {
1807 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1809 ret = ERR_PTR(-ENOMEM);
1813 dev->n_ahs_allocated++;
1814 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1816 /* ib_create_ah() will initialize ah->ibah. */
1817 ah->attr = *ah_attr;
1818 atomic_set(&ah->refcount, 0);
1827 * qib_destroy_ah - destroy an address handle
1828 * @ibah: the AH to destroy
1830 * This may be called from interrupt context.
1832 static int qib_destroy_ah(struct ib_ah *ibah)
1834 struct qib_ibdev *dev = to_idev(ibah->device);
1835 struct qib_ah *ah = to_iah(ibah);
1836 unsigned long flags;
1838 if (atomic_read(&ah->refcount) != 0)
1841 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1842 dev->n_ahs_allocated--;
1843 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1850 static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1852 struct qib_ah *ah = to_iah(ibah);
1854 if (qib_check_ah(ibah->device, ah_attr))
1857 ah->attr = *ah_attr;
1862 static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1864 struct qib_ah *ah = to_iah(ibah);
1866 *ah_attr = ah->attr;
1872 * qib_get_npkeys - return the size of the PKEY table for context 0
1873 * @dd: the qlogic_ib device
1875 unsigned qib_get_npkeys(struct qib_devdata *dd)
1877 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1881 * Return the indexed PKEY from the port PKEY table.
1882 * No need to validate rcd[ctxt]; the port is setup if we are here.
1884 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1886 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1887 struct qib_devdata *dd = ppd->dd;
1888 unsigned ctxt = ppd->hw_pidx;
1891 /* dd->rcd null if mini_init or some init failures */
1892 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1895 ret = dd->rcd[ctxt]->pkeys[index];
1900 static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1903 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1906 if (index >= qib_get_npkeys(dd)) {
1911 *pkey = qib_get_pkey(to_iport(ibdev, port), index);
1919 * qib_alloc_ucontext - allocate a ucontest
1920 * @ibdev: the infiniband device
1921 * @udata: not used by the QLogic_IB driver
1924 static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1925 struct ib_udata *udata)
1927 struct qib_ucontext *context;
1928 struct ib_ucontext *ret;
1930 context = kmalloc(sizeof *context, GFP_KERNEL);
1932 ret = ERR_PTR(-ENOMEM);
1936 ret = &context->ibucontext;
1942 static int qib_dealloc_ucontext(struct ib_ucontext *context)
1944 kfree(to_iucontext(context));
1948 static void init_ibport(struct qib_pportdata *ppd)
1950 struct qib_verbs_counters cntrs;
1951 struct qib_ibport *ibp = &ppd->ibport_data;
1953 spin_lock_init(&ibp->lock);
1954 /* Set the prefix to the default value (see ch. 4.1.1) */
1955 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
1956 ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1957 ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1958 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1959 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1960 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1961 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1962 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1963 ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1964 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1965 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1966 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1967 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1968 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1970 /* Snapshot current HW counters to "clear" them. */
1971 qib_get_counters(ppd, &cntrs);
1972 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1973 ibp->z_link_error_recovery_counter =
1974 cntrs.link_error_recovery_counter;
1975 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1976 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1977 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1978 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1979 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1980 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1981 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1982 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1983 ibp->z_local_link_integrity_errors =
1984 cntrs.local_link_integrity_errors;
1985 ibp->z_excessive_buffer_overrun_errors =
1986 cntrs.excessive_buffer_overrun_errors;
1987 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1988 RCU_INIT_POINTER(ibp->qp0, NULL);
1989 RCU_INIT_POINTER(ibp->qp1, NULL);
1993 * qib_register_ib_device - register our device with the infiniband core
1994 * @dd: the device data structure
1995 * Return the allocated qib_ibdev pointer or NULL on error.
1997 int qib_register_ib_device(struct qib_devdata *dd)
1999 struct qib_ibdev *dev = &dd->verbs_dev;
2000 struct ib_device *ibdev = &dev->ibdev;
2001 struct qib_pportdata *ppd = dd->pport;
2002 unsigned i, lk_tab_size;
2005 dev->qp_table_size = ib_qib_qp_table_size;
2006 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
2007 dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
2009 if (!dev->qp_table) {
2013 for (i = 0; i < dev->qp_table_size; i++)
2014 RCU_INIT_POINTER(dev->qp_table[i], NULL);
2016 for (i = 0; i < dd->num_pports; i++)
2017 init_ibport(ppd + i);
2019 /* Only need to initialize non-zero fields. */
2020 spin_lock_init(&dev->qpt_lock);
2021 spin_lock_init(&dev->n_pds_lock);
2022 spin_lock_init(&dev->n_ahs_lock);
2023 spin_lock_init(&dev->n_cqs_lock);
2024 spin_lock_init(&dev->n_qps_lock);
2025 spin_lock_init(&dev->n_srqs_lock);
2026 spin_lock_init(&dev->n_mcast_grps_lock);
2027 init_timer(&dev->mem_timer);
2028 dev->mem_timer.function = mem_timer;
2029 dev->mem_timer.data = (unsigned long) dev;
2031 qib_init_qpn_table(dd, &dev->qpn_table);
2034 * The top ib_qib_lkey_table_size bits are used to index the
2035 * table. The lower 8 bits can be owned by the user (copied from
2036 * the LKEY). The remaining bits act as a generation number or tag.
2038 spin_lock_init(&dev->lk_table.lock);
2039 /* insure generation is at least 4 bits see keys.c */
2040 if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
2041 qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
2042 ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
2043 ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
2045 dev->lk_table.max = 1 << ib_qib_lkey_table_size;
2046 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2047 dev->lk_table.table = (struct qib_mregion **)
2048 vmalloc(lk_tab_size);
2049 if (dev->lk_table.table == NULL) {
2053 memset(dev->lk_table.table, 0, lk_tab_size);
2054 INIT_LIST_HEAD(&dev->pending_mmaps);
2055 spin_lock_init(&dev->pending_lock);
2056 dev->mmap_offset = PAGE_SIZE;
2057 spin_lock_init(&dev->mmap_offset_lock);
2058 INIT_LIST_HEAD(&dev->piowait);
2059 INIT_LIST_HEAD(&dev->dmawait);
2060 INIT_LIST_HEAD(&dev->txwait);
2061 INIT_LIST_HEAD(&dev->memwait);
2062 INIT_LIST_HEAD(&dev->txreq_free);
2064 if (ppd->sdma_descq_cnt) {
2065 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2066 ppd->sdma_descq_cnt *
2067 sizeof(struct qib_pio_header),
2068 &dev->pio_hdrs_phys,
2070 if (!dev->pio_hdrs) {
2076 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2077 struct qib_verbs_txreq *tx;
2079 tx = kzalloc(sizeof *tx, GFP_KERNEL);
2085 list_add(&tx->txreq.list, &dev->txreq_free);
2089 * The system image GUID is supposed to be the same for all
2090 * IB HCAs in a single system but since there can be other
2091 * device types in the system, we can't be sure this is unique.
2093 if (!ib_qib_sys_image_guid)
2094 ib_qib_sys_image_guid = ppd->guid;
2096 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2097 ibdev->owner = THIS_MODULE;
2098 ibdev->node_guid = ppd->guid;
2099 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2100 ibdev->uverbs_cmd_mask =
2101 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2102 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2103 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2104 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2105 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2106 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2107 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
2108 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2109 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2110 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2111 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2112 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2113 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2114 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2115 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2116 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2117 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2118 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2119 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2120 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2121 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2122 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2123 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2124 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2125 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2126 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2127 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2128 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2129 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2130 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2131 ibdev->node_type = RDMA_NODE_IB_CA;
2132 ibdev->phys_port_cnt = dd->num_pports;
2133 ibdev->num_comp_vectors = 1;
2134 ibdev->dma_device = &dd->pcidev->dev;
2135 ibdev->query_device = qib_query_device;
2136 ibdev->modify_device = qib_modify_device;
2137 ibdev->query_port = qib_query_port;
2138 ibdev->modify_port = qib_modify_port;
2139 ibdev->query_pkey = qib_query_pkey;
2140 ibdev->query_gid = qib_query_gid;
2141 ibdev->alloc_ucontext = qib_alloc_ucontext;
2142 ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2143 ibdev->alloc_pd = qib_alloc_pd;
2144 ibdev->dealloc_pd = qib_dealloc_pd;
2145 ibdev->create_ah = qib_create_ah;
2146 ibdev->destroy_ah = qib_destroy_ah;
2147 ibdev->modify_ah = qib_modify_ah;
2148 ibdev->query_ah = qib_query_ah;
2149 ibdev->create_srq = qib_create_srq;
2150 ibdev->modify_srq = qib_modify_srq;
2151 ibdev->query_srq = qib_query_srq;
2152 ibdev->destroy_srq = qib_destroy_srq;
2153 ibdev->create_qp = qib_create_qp;
2154 ibdev->modify_qp = qib_modify_qp;
2155 ibdev->query_qp = qib_query_qp;
2156 ibdev->destroy_qp = qib_destroy_qp;
2157 ibdev->post_send = qib_post_send;
2158 ibdev->post_recv = qib_post_receive;
2159 ibdev->post_srq_recv = qib_post_srq_receive;
2160 ibdev->create_cq = qib_create_cq;
2161 ibdev->destroy_cq = qib_destroy_cq;
2162 ibdev->resize_cq = qib_resize_cq;
2163 ibdev->poll_cq = qib_poll_cq;
2164 ibdev->req_notify_cq = qib_req_notify_cq;
2165 ibdev->get_dma_mr = qib_get_dma_mr;
2166 ibdev->reg_phys_mr = qib_reg_phys_mr;
2167 ibdev->reg_user_mr = qib_reg_user_mr;
2168 ibdev->dereg_mr = qib_dereg_mr;
2169 ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
2170 ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
2171 ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
2172 ibdev->alloc_fmr = qib_alloc_fmr;
2173 ibdev->map_phys_fmr = qib_map_phys_fmr;
2174 ibdev->unmap_fmr = qib_unmap_fmr;
2175 ibdev->dealloc_fmr = qib_dealloc_fmr;
2176 ibdev->attach_mcast = qib_multicast_attach;
2177 ibdev->detach_mcast = qib_multicast_detach;
2178 ibdev->process_mad = qib_process_mad;
2179 ibdev->mmap = qib_mmap;
2180 ibdev->dma_ops = &qib_dma_mapping_ops;
2182 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2183 QIB_IDSTR " %s", init_utsname()->nodename);
2185 ret = ib_register_device(ibdev, qib_create_port_files);
2189 ret = qib_create_agents(dev);
2193 if (qib_verbs_register_sysfs(dd))
2199 qib_free_agents(dev);
2201 ib_unregister_device(ibdev);
2204 while (!list_empty(&dev->txreq_free)) {
2205 struct list_head *l = dev->txreq_free.next;
2206 struct qib_verbs_txreq *tx;
2209 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2212 if (ppd->sdma_descq_cnt)
2213 dma_free_coherent(&dd->pcidev->dev,
2214 ppd->sdma_descq_cnt *
2215 sizeof(struct qib_pio_header),
2216 dev->pio_hdrs, dev->pio_hdrs_phys);
2218 vfree(dev->lk_table.table);
2220 kfree(dev->qp_table);
2222 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2227 void qib_unregister_ib_device(struct qib_devdata *dd)
2229 struct qib_ibdev *dev = &dd->verbs_dev;
2230 struct ib_device *ibdev = &dev->ibdev;
2232 unsigned lk_tab_size;
2234 qib_verbs_unregister_sysfs(dd);
2236 qib_free_agents(dev);
2238 ib_unregister_device(ibdev);
2240 if (!list_empty(&dev->piowait))
2241 qib_dev_err(dd, "piowait list not empty!\n");
2242 if (!list_empty(&dev->dmawait))
2243 qib_dev_err(dd, "dmawait list not empty!\n");
2244 if (!list_empty(&dev->txwait))
2245 qib_dev_err(dd, "txwait list not empty!\n");
2246 if (!list_empty(&dev->memwait))
2247 qib_dev_err(dd, "memwait list not empty!\n");
2249 qib_dev_err(dd, "DMA MR not NULL!\n");
2251 qps_inuse = qib_free_all_qps(dd);
2253 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2256 del_timer_sync(&dev->mem_timer);
2257 qib_free_qpn_table(&dev->qpn_table);
2258 while (!list_empty(&dev->txreq_free)) {
2259 struct list_head *l = dev->txreq_free.next;
2260 struct qib_verbs_txreq *tx;
2263 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2266 if (dd->pport->sdma_descq_cnt)
2267 dma_free_coherent(&dd->pcidev->dev,
2268 dd->pport->sdma_descq_cnt *
2269 sizeof(struct qib_pio_header),
2270 dev->pio_hdrs, dev->pio_hdrs_phys);
2271 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2272 vfree(dev->lk_table.table);
2273 kfree(dev->qp_table);