2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * post_send/recv, poll_cq, req_notify
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm-powerpc/system.h>
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 /* in RC traffic, insert an empty RDMA READ every this many packets */
54 #define ACK_CIRC_THRESHOLD 2000000
56 static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
57 struct ehca_wqe *wqe_p,
58 struct ib_recv_wr *recv_wr)
61 if (unlikely((recv_wr->num_sge < 0) ||
62 (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
63 ehca_gen_err("Invalid number of WQE SGE. "
64 "num_sqe=%x max_nr_of_sg=%x",
65 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
66 return -EINVAL; /* invalid SG list length */
69 /* clear wqe header until sglist */
70 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
72 wqe_p->work_request_id = recv_wr->wr_id;
73 wqe_p->nr_of_data_seg = recv_wr->num_sge;
75 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
76 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
77 recv_wr->sg_list[cnt_ds].addr;
78 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
79 recv_wr->sg_list[cnt_ds].lkey;
80 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
81 recv_wr->sg_list[cnt_ds].length;
84 if (ehca_debug_level) {
85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
93 #if defined(DEBUG_GSI_SEND_WR)
95 /* need ib_mad struct */
96 #include <rdma/ib_mad.h>
98 static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
103 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
104 struct ib_sge *sge = send_wr->sg_list;
105 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
106 "send_flags=%x opcode=%x", idx, send_wr->wr_id,
107 send_wr->num_sge, send_wr->send_flags,
110 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
111 "mgmt_class=%x class_version=%x method=%x "
112 "status=%x class_specific=%x tid=%lx "
113 "attr_id=%x resv=%x attr_mod=%x",
114 idx, mad_hdr->base_version,
116 mad_hdr->class_version, mad_hdr->method,
117 mad_hdr->status, mad_hdr->class_specific,
118 mad_hdr->tid, mad_hdr->attr_id,
122 for (j = 0; j < send_wr->num_sge; j++) {
123 u8 *data = (u8 *)abs_to_virt(sge->addr);
124 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
126 idx, j, data, sge->length, sge->lkey);
127 /* assume length is n*16 */
128 ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
133 send_wr = send_wr->next;
134 } /* eof while send_wr */
137 #endif /* DEBUG_GSI_SEND_WR */
139 static inline int ehca_write_swqe(struct ehca_qp *qp,
140 struct ehca_wqe *wqe_p,
141 const struct ib_send_wr *send_wr,
146 struct ehca_av *my_av;
147 u32 remote_qkey = send_wr->wr.ud.remote_qkey;
149 if (unlikely((send_wr->num_sge < 0) ||
150 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
151 ehca_gen_err("Invalid number of WQE SGE. "
152 "num_sqe=%x max_nr_of_sg=%x",
153 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
154 return -EINVAL; /* invalid SG list length */
157 /* clear wqe header until sglist */
158 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
160 wqe_p->work_request_id = send_wr->wr_id;
162 switch (send_wr->opcode) {
164 case IB_WR_SEND_WITH_IMM:
165 wqe_p->optype = WQE_OPTYPE_SEND;
167 case IB_WR_RDMA_WRITE:
168 case IB_WR_RDMA_WRITE_WITH_IMM:
169 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
171 case IB_WR_RDMA_READ:
172 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
175 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
176 return -EINVAL; /* invalid opcode */
179 wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
183 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
184 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
186 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
190 /* this might not work as long as HW does not support it */
191 wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
195 wqe_p->nr_of_data_seg = send_wr->num_sge;
197 switch (qp->qp_type) {
200 /* no break is intential here */
202 /* IB 1.2 spec C10-15 compliance */
203 if (send_wr->wr.ud.remote_qkey & 0x80000000)
204 remote_qkey = qp->qkey;
206 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
207 wqe_p->local_ee_context_qkey = remote_qkey;
208 if (unlikely(!send_wr->wr.ud.ah)) {
209 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
212 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
213 wqe_p->u.ud_av.ud_av = my_av->av;
216 * omitted check of IB_SEND_INLINE
217 * since HW does not support it
219 for (idx = 0; idx < send_wr->num_sge; idx++) {
220 wqe_p->u.ud_av.sg_list[idx].vaddr =
221 send_wr->sg_list[idx].addr;
222 wqe_p->u.ud_av.sg_list[idx].lkey =
223 send_wr->sg_list[idx].lkey;
224 wqe_p->u.ud_av.sg_list[idx].length =
225 send_wr->sg_list[idx].length;
227 if (qp->qp_type == IB_QPT_SMI ||
228 qp->qp_type == IB_QPT_GSI)
229 wqe_p->u.ud_av.ud_av.pmtu = 1;
230 if (qp->qp_type == IB_QPT_GSI) {
231 wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
232 #ifdef DEBUG_GSI_SEND_WR
233 trace_send_wr_ud(send_wr);
234 #endif /* DEBUG_GSI_SEND_WR */
239 if (send_wr->send_flags & IB_SEND_FENCE)
240 wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
241 /* no break is intentional here */
243 /* TODO: atomic not implemented */
244 wqe_p->u.nud.remote_virtual_adress =
245 send_wr->wr.rdma.remote_addr;
246 wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
249 * omitted checking of IB_SEND_INLINE
250 * since HW does not support it
253 for (idx = 0; idx < send_wr->num_sge; idx++) {
254 wqe_p->u.nud.sg_list[idx].vaddr =
255 send_wr->sg_list[idx].addr;
256 wqe_p->u.nud.sg_list[idx].lkey =
257 send_wr->sg_list[idx].lkey;
258 wqe_p->u.nud.sg_list[idx].length =
259 send_wr->sg_list[idx].length;
260 dma_length += send_wr->sg_list[idx].length;
262 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
264 /* unsolicited ack circumvention */
265 if (send_wr->opcode == IB_WR_RDMA_READ) {
266 /* on RDMA read, switch on and reset counters */
267 qp->message_count = qp->packet_count = 0;
268 qp->unsol_ack_circ = 1;
270 /* else estimate #packets */
271 qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
276 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
280 if (ehca_debug_level) {
281 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
282 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
287 /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
288 static inline void map_ib_wc_status(u32 cqe_status,
289 enum ib_wc_status *wc_status)
291 if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
292 switch (cqe_status & 0x3F) {
295 *wc_status = IB_WC_LOC_LEN_ERR;
299 *wc_status = IB_WC_LOC_QP_OP_ERR;
303 *wc_status = IB_WC_LOC_EEC_OP_ERR;
307 *wc_status = IB_WC_LOC_PROT_ERR;
311 *wc_status = IB_WC_WR_FLUSH_ERR;
314 *wc_status = IB_WC_MW_BIND_ERR;
316 case 0x07: /* remote error - look into bits 20:24 */
318 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
321 * PSN Sequence Error!
322 * couldn't find a matching status!
324 *wc_status = IB_WC_GENERAL_ERR;
327 *wc_status = IB_WC_REM_INV_REQ_ERR;
330 *wc_status = IB_WC_REM_ACCESS_ERR;
333 *wc_status = IB_WC_REM_OP_ERR;
336 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
341 *wc_status = IB_WC_RETRY_EXC_ERR;
344 *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
348 *wc_status = IB_WC_REM_ABORT_ERR;
352 *wc_status = IB_WC_INV_EECN_ERR;
356 *wc_status = IB_WC_INV_EEC_STATE_ERR;
359 *wc_status = IB_WC_BAD_RESP_ERR;
363 *wc_status = IB_WC_WR_FLUSH_ERR;
366 *wc_status = IB_WC_FATAL_ERR;
370 *wc_status = IB_WC_SUCCESS;
373 static inline int post_one_send(struct ehca_qp *my_qp,
374 struct ib_send_wr *cur_send_wr,
375 struct ib_send_wr **bad_send_wr,
378 struct ehca_wqe *wqe_p;
380 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
382 /* get pointer next to free WQE */
383 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
384 if (unlikely(!wqe_p)) {
385 /* too many posted work requests: queue overflow */
387 *bad_send_wr = cur_send_wr;
388 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
389 "qp_num=%x", my_qp->ib_qp.qp_num);
392 /* write a SEND WQE into the QUEUE */
393 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
395 * if something failed,
396 * reset the free entry pointer to the start value
399 my_qp->ipz_squeue.current_q_offset = start_offset;
401 *bad_send_wr = cur_send_wr;
402 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
403 "qp_num=%x", my_qp->ib_qp.qp_num);
410 int ehca_post_send(struct ib_qp *qp,
411 struct ib_send_wr *send_wr,
412 struct ib_send_wr **bad_send_wr)
414 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
415 struct ib_send_wr *cur_send_wr;
421 spin_lock_irqsave(&my_qp->spinlock_s, flags);
423 /* Send an empty extra RDMA read if:
424 * 1) there has been an RDMA read on this connection before
425 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
426 * 3) we can be sure that any previous extra RDMA read has been
427 * processed so we don't overflow the SQ
429 if (unlikely(my_qp->unsol_ack_circ &&
430 my_qp->packet_count > ACK_CIRC_THRESHOLD &&
431 my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
432 /* insert an empty RDMA READ to fix up the remote QP state */
433 struct ib_send_wr circ_wr;
434 memset(&circ_wr, 0, sizeof(circ_wr));
435 circ_wr.opcode = IB_WR_RDMA_READ;
436 post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
438 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
439 my_qp->message_count = my_qp->packet_count = 0;
442 /* loop processes list of send reqs */
443 for (cur_send_wr = send_wr; cur_send_wr != NULL;
444 cur_send_wr = cur_send_wr->next) {
445 ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
447 /* if one or more WQEs were successful, don't fail */
450 goto post_send_exit0;
453 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
454 my_qp, qp->qp_num, wqe_cnt);
455 } /* eof for cur_send_wr */
458 iosync(); /* serialize GAL register access */
459 hipz_update_sqa(my_qp, wqe_cnt);
460 my_qp->message_count += wqe_cnt;
461 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
465 static int internal_post_recv(struct ehca_qp *my_qp,
466 struct ib_device *dev,
467 struct ib_recv_wr *recv_wr,
468 struct ib_recv_wr **bad_recv_wr)
470 struct ib_recv_wr *cur_recv_wr;
471 struct ehca_wqe *wqe_p;
476 if (unlikely(!HAS_RQ(my_qp))) {
477 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
478 my_qp, my_qp->real_qp_num, my_qp->ext_type);
483 spin_lock_irqsave(&my_qp->spinlock_r, flags);
485 /* loop processes list of send reqs */
486 for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
487 cur_recv_wr = cur_recv_wr->next) {
488 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
489 /* get pointer next to free WQE */
490 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
491 if (unlikely(!wqe_p)) {
492 /* too many posted work requests: queue overflow */
494 *bad_recv_wr = cur_recv_wr;
497 ehca_err(dev, "Too many posted WQEs "
498 "qp_num=%x", my_qp->real_qp_num);
500 goto post_recv_exit0;
502 /* write a RECV WQE into the QUEUE */
503 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
505 * if something failed,
506 * reset the free entry pointer to the start value
509 my_qp->ipz_rqueue.current_q_offset = start_offset;
510 *bad_recv_wr = cur_recv_wr;
513 ehca_err(dev, "Could not write WQE "
514 "qp_num=%x", my_qp->real_qp_num);
516 goto post_recv_exit0;
519 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
520 my_qp, my_qp->real_qp_num, wqe_cnt);
521 } /* eof for cur_recv_wr */
524 iosync(); /* serialize GAL register access */
525 hipz_update_rqa(my_qp, wqe_cnt);
526 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
530 int ehca_post_recv(struct ib_qp *qp,
531 struct ib_recv_wr *recv_wr,
532 struct ib_recv_wr **bad_recv_wr)
534 return internal_post_recv(container_of(qp, struct ehca_qp, ib_qp),
535 qp->device, recv_wr, bad_recv_wr);
538 int ehca_post_srq_recv(struct ib_srq *srq,
539 struct ib_recv_wr *recv_wr,
540 struct ib_recv_wr **bad_recv_wr)
542 return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
543 srq->device, recv_wr, bad_recv_wr);
547 * ib_wc_opcode table converts ehca wc opcode to ib
548 * Since we use zero to indicate invalid opcode, the actual ib opcode must
551 static const u8 ib_wc_opcode[255] = {
552 [0x01] = IB_WC_RECV+1,
553 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
554 [0x04] = IB_WC_BIND_MW+1,
555 [0x08] = IB_WC_FETCH_ADD+1,
556 [0x10] = IB_WC_COMP_SWAP+1,
557 [0x20] = IB_WC_RDMA_WRITE+1,
558 [0x40] = IB_WC_RDMA_READ+1,
559 [0x80] = IB_WC_SEND+1
562 /* internal function to poll one entry of cq */
563 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
566 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
567 struct ehca_cqe *cqe;
568 struct ehca_qp *my_qp;
571 poll_cq_one_read_cqe:
572 cqe = (struct ehca_cqe *)
573 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
576 ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
577 "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret);
578 goto poll_cq_one_exit0;
581 /* prevents loads being reordered across this point */
585 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
590 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
592 ehca_err(cq->device, "cq_num=%x qp_num=%x "
593 "could not find qp -> ignore cqe",
594 my_cq->cq_number, cqe->local_qp_number);
595 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
596 my_cq->cq_number, cqe->local_qp_number);
597 /* ignore this purged cqe */
598 goto poll_cq_one_read_cqe;
600 spin_lock_irqsave(&qp->spinlock_s, flags);
601 purgeflag = qp->sqerr_purgeflag;
602 spin_unlock_irqrestore(&qp->spinlock_s, flags);
606 "Got CQE with purged bit qp_num=%x src_qp=%x",
607 cqe->local_qp_number, cqe->remote_qp_number);
608 if (ehca_debug_level)
609 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
610 cqe->local_qp_number,
611 cqe->remote_qp_number);
613 * ignore this to avoid double cqes of bad wqe
614 * that caused sqe and turn off purge flag
616 qp->sqerr_purgeflag = 0;
617 goto poll_cq_one_read_cqe;
622 if (unlikely(ehca_debug_level)) {
624 "Received COMPLETION ehca_cq=%p cq_num=%x -----",
625 my_cq, my_cq->cq_number);
626 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
627 my_cq, my_cq->cq_number);
629 "ehca_cq=%p cq_num=%x -------------------------",
630 my_cq, my_cq->cq_number);
633 /* we got a completion! */
634 wc->wr_id = cqe->work_request_id;
636 /* eval ib_wc_opcode */
637 wc->opcode = ib_wc_opcode[cqe->optype]-1;
638 if (unlikely(wc->opcode == -1)) {
639 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
640 "ehca_cq=%p cq_num=%x",
641 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
642 /* dump cqe for other infos */
643 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
644 my_cq, my_cq->cq_number);
645 /* update also queue adder to throw away this entry!!! */
646 goto poll_cq_one_exit0;
648 /* eval ib_wc_status */
649 if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
650 /* complete with errors */
651 map_ib_wc_status(cqe->status, &wc->status);
652 wc->vendor_err = wc->status;
654 wc->status = IB_WC_SUCCESS;
656 read_lock(&ehca_qp_idr_lock);
657 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
658 wc->qp = &my_qp->ib_qp;
659 read_unlock(&ehca_qp_idr_lock);
661 wc->byte_len = cqe->nr_bytes_transferred;
662 wc->pkey_index = cqe->pkey_index;
663 wc->slid = cqe->rlid;
664 wc->dlid_path_bits = cqe->dlid;
665 wc->src_qp = cqe->remote_qp_number;
666 wc->wc_flags = cqe->w_completion_flags;
667 wc->imm_data = cpu_to_be32(cqe->immediate_data);
668 wc->sl = cqe->service_level;
670 if (unlikely(wc->status != IB_WC_SUCCESS))
672 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
673 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
674 "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
675 cqe->status, cqe->local_qp_number,
676 cqe->remote_qp_number, cqe->work_request_id, cqe);
680 hipz_update_feca(my_cq, cqe_count);
685 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
687 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
689 struct ib_wc *current_wc = wc;
693 if (num_entries < 1) {
694 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
695 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
700 spin_lock_irqsave(&my_cq->spinlock, flags);
701 for (nr = 0; nr < num_entries; nr++) {
702 ret = ehca_poll_cq_one(cq, current_wc);
707 spin_unlock_irqrestore(&my_cq->spinlock, flags);
708 if (ret == -EAGAIN || !ret)
715 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
717 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
720 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
721 case IB_CQ_SOLICITED:
722 hipz_set_cqx_n0(my_cq, 1);
724 case IB_CQ_NEXT_COMP:
725 hipz_set_cqx_n1(my_cq, 1);
731 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
732 unsigned long spl_flags;
733 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
734 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
735 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);