2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 static struct kmem_cache *qp_cache;
56 * attributes not supported by query qp
58 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
59 IB_QP_EN_SQD_ASYNC_NOTIFY)
62 * ehca (internal) qp state values
75 * qp state transitions as defined by IB Arch Rel 1.1 page 431
77 enum ib_qp_statetrans {
89 IB_QPST_MAX /* nr of transitions, this must be last!!! */
93 * ib2ehca_qp_state maps IB to ehca qp_state
94 * returns ehca qp state corresponding to given ib qp state
96 static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
98 switch (ib_qp_state) {
100 return EHCA_QPS_RESET;
102 return EHCA_QPS_INIT;
114 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
120 * ehca2ib_qp_state maps ehca to IB qp_state
121 * returns ib qp state corresponding to given ehca qp state
123 static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
126 switch (ehca_qp_state) {
142 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
148 * ehca_qp_type used as index for req_attr and opt_attr of
149 * struct ehca_modqp_statetrans
160 * ib2ehcaqptype maps Ib to ehca qp_type
161 * returns ehca qp type corresponding to ib qp type
163 static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
176 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
181 static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
185 switch (ib_tostate) {
187 index = IB_QPST_ANY2RESET;
190 switch (ib_fromstate) {
192 index = IB_QPST_RESET2INIT;
195 index = IB_QPST_INIT2INIT;
200 if (ib_fromstate == IB_QPS_INIT)
201 index = IB_QPST_INIT2RTR;
204 switch (ib_fromstate) {
206 index = IB_QPST_RTR2RTS;
209 index = IB_QPST_RTS2RTS;
212 index = IB_QPST_SQD2RTS;
215 index = IB_QPST_SQE2RTS;
220 if (ib_fromstate == IB_QPS_RTS)
221 index = IB_QPST_RTS2SQD;
226 index = IB_QPST_ANY2ERR;
235 * ibqptype2servicetype returns hcp service type corresponding to given
236 * ib qp type used by create_qp()
238 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
250 case IB_QPT_RAW_IPV6:
255 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
261 * init userspace queue info from ipz_queue data
263 static inline void queue2resp(struct ipzu_queue_resp *resp,
264 struct ipz_queue *queue)
266 resp->qe_size = queue->qe_size;
267 resp->act_nr_of_sg = queue->act_nr_of_sg;
268 resp->queue_length = queue->queue_length;
269 resp->pagesize = queue->pagesize;
270 resp->toggle_state = queue->toggle_state;
271 resp->offset = queue->offset;
275 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
277 static inline int init_qp_queue(struct ehca_shca *shca,
279 struct ehca_qp *my_qp,
280 struct ipz_queue *queue,
283 struct ehca_alloc_queue_parms *parms,
286 int ret, cnt, ipz_rc, nr_q_pages;
289 struct ib_device *ib_dev = &shca->ib_device;
290 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
292 if (!parms->queue_size)
295 if (parms->is_small) {
297 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
298 128 << parms->page_size,
299 wqe_size, parms->act_nr_sges, 1);
301 nr_q_pages = parms->queue_size;
302 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
303 EHCA_PAGESIZE, wqe_size,
304 parms->act_nr_sges, 0);
308 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
313 /* register queue pages */
314 for (cnt = 0; cnt < nr_q_pages; cnt++) {
315 vpage = ipz_qpageit_get_inc(queue);
317 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
318 "failed p_vpage= %p", vpage);
322 rpage = virt_to_abs(vpage);
324 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
325 my_qp->ipz_qp_handle,
327 rpage, parms->is_small ? 0 : 1,
328 my_qp->galpas.kernel);
329 if (cnt == (nr_q_pages - 1)) { /* last page! */
330 if (h_ret != expected_hret) {
331 ehca_err(ib_dev, "hipz_qp_register_rpage() "
332 "h_ret=%lli", h_ret);
333 ret = ehca2ib_return_code(h_ret);
336 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
338 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
339 "should not succeed vpage=%p", vpage);
344 if (h_ret != H_PAGE_REGISTERED) {
345 ehca_err(ib_dev, "hipz_qp_register_rpage() "
346 "h_ret=%lli", h_ret);
347 ret = ehca2ib_return_code(h_ret);
353 ipz_qeit_reset(queue);
358 ipz_queue_dtor(pd, queue);
362 static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
365 return 128 << act_nr_sge;
367 return offsetof(struct ehca_wqe,
368 u.nud.sg_list[act_nr_sge]);
371 static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
372 int req_nr_sge, int is_llqp)
374 u32 wqe_size, q_size;
375 int act_nr_sge = req_nr_sge;
378 /* round up #SGEs so WQE size is a power of 2 */
379 for (act_nr_sge = 4; act_nr_sge <= 252;
380 act_nr_sge = 4 + 2 * act_nr_sge)
381 if (act_nr_sge >= req_nr_sge)
384 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
385 q_size = wqe_size * (queue->max_wr + 1);
388 queue->page_size = 2;
389 else if (q_size <= 1024)
390 queue->page_size = 3;
392 queue->page_size = 0;
394 queue->is_small = (queue->page_size != 0);
397 /* needs to be called with cq->spinlock held */
398 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
400 struct list_head *list, *node;
402 /* TODO: support low latency QPs */
403 if (qp->ext_type == EQPT_LLQP)
407 list = &qp->send_cq->sqp_err_list;
408 node = &qp->sq_err_node;
410 list = &qp->recv_cq->rqp_err_list;
411 node = &qp->rq_err_node;
414 if (list_empty(node))
415 list_add_tail(node, list);
420 static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
424 spin_lock_irqsave(&cq->spinlock, flags);
426 if (!list_empty(node))
429 spin_unlock_irqrestore(&cq->spinlock, flags);
432 static void reset_queue_map(struct ehca_queue_map *qmap)
436 qmap->tail = qmap->entries - 1;
437 qmap->left_to_poll = 0;
438 qmap->next_wqe_idx = 0;
439 for (i = 0; i < qmap->entries; i++) {
440 qmap->map[i].reported = 1;
441 qmap->map[i].cqe_req = 0;
446 * Create an ib_qp struct that is either a QP or an SRQ, depending on
447 * the value of the is_srq parameter. If init_attr and srq_init_attr share
448 * fields, the field out of init_attr is used.
450 static struct ehca_qp *internal_create_qp(
452 struct ib_qp_init_attr *init_attr,
453 struct ib_srq_init_attr *srq_init_attr,
454 struct ib_udata *udata, int is_srq)
456 struct ehca_qp *my_qp, *my_srq = NULL;
457 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
458 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
460 struct ib_ucontext *context = NULL;
462 int is_llqp = 0, has_srq = 0, is_user = 0;
463 int qp_type, max_send_sge, max_recv_sge, ret;
465 /* h_call's out parameters */
466 struct ehca_alloc_qp_parms parms;
467 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
470 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
471 ehca_err(pd->device, "Unable to create QP, max number of %i "
472 "QPs reached.", shca->max_num_qps);
473 ehca_err(pd->device, "To increase the maximum number of QPs "
474 "use the number_of_qps module parameter.\n");
475 return ERR_PTR(-ENOSPC);
478 if (init_attr->create_flags) {
479 atomic_dec(&shca->num_qps);
480 return ERR_PTR(-EINVAL);
483 memset(&parms, 0, sizeof(parms));
484 qp_type = init_attr->qp_type;
486 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
487 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
488 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
489 init_attr->sq_sig_type);
490 atomic_dec(&shca->num_qps);
491 return ERR_PTR(-EINVAL);
495 if (qp_type & 0x80) {
497 parms.ext_type = EQPT_LLQP;
498 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
501 init_attr->qp_type &= 0x1F;
503 /* handle SRQ base QPs */
504 if (init_attr->srq) {
505 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
507 if (qp_type == IB_QPT_UC) {
508 ehca_err(pd->device, "UC with SRQ not supported");
509 atomic_dec(&shca->num_qps);
510 return ERR_PTR(-EINVAL);
514 parms.ext_type = EQPT_SRQBASE;
515 parms.srq_qpn = my_srq->real_qp_num;
518 if (is_llqp && has_srq) {
519 ehca_err(pd->device, "LLQPs can't have an SRQ");
520 atomic_dec(&shca->num_qps);
521 return ERR_PTR(-EINVAL);
526 parms.ext_type = EQPT_SRQ;
527 parms.srq_limit = srq_init_attr->attr.srq_limit;
528 if (init_attr->cap.max_recv_sge > 3) {
529 ehca_err(pd->device, "no more than three SGEs "
530 "supported for SRQ pd=%p max_sge=%x",
531 pd, init_attr->cap.max_recv_sge);
532 atomic_dec(&shca->num_qps);
533 return ERR_PTR(-EINVAL);
538 if (qp_type != IB_QPT_UD &&
539 qp_type != IB_QPT_UC &&
540 qp_type != IB_QPT_RC &&
541 qp_type != IB_QPT_SMI &&
542 qp_type != IB_QPT_GSI) {
543 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
544 atomic_dec(&shca->num_qps);
545 return ERR_PTR(-EINVAL);
551 if ((init_attr->cap.max_send_wr > 255) ||
552 (init_attr->cap.max_recv_wr > 255)) {
554 "Invalid Number of max_sq_wr=%x "
555 "or max_rq_wr=%x for RC LLQP",
556 init_attr->cap.max_send_wr,
557 init_attr->cap.max_recv_wr);
558 atomic_dec(&shca->num_qps);
559 return ERR_PTR(-EINVAL);
563 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
564 ehca_err(pd->device, "UD LLQP not supported "
566 atomic_dec(&shca->num_qps);
567 return ERR_PTR(-ENOSYS);
569 if (!(init_attr->cap.max_send_sge <= 5
570 && init_attr->cap.max_send_sge >= 1
571 && init_attr->cap.max_recv_sge <= 5
572 && init_attr->cap.max_recv_sge >= 1)) {
574 "Invalid Number of max_send_sge=%x "
575 "or max_recv_sge=%x for UD LLQP",
576 init_attr->cap.max_send_sge,
577 init_attr->cap.max_recv_sge);
578 atomic_dec(&shca->num_qps);
579 return ERR_PTR(-EINVAL);
580 } else if (init_attr->cap.max_send_wr > 255) {
583 "max_send_wr=%x for UD QP_TYPE=%x",
584 init_attr->cap.max_send_wr, qp_type);
585 atomic_dec(&shca->num_qps);
586 return ERR_PTR(-EINVAL);
590 ehca_err(pd->device, "unsupported LL QP Type=%x",
592 atomic_dec(&shca->num_qps);
593 return ERR_PTR(-EINVAL);
596 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
597 || qp_type == IB_QPT_GSI) ? 250 : 252;
599 if (init_attr->cap.max_send_sge > max_sge
600 || init_attr->cap.max_recv_sge > max_sge) {
601 ehca_err(pd->device, "Invalid number of SGEs requested "
602 "send_sge=%x recv_sge=%x max_sge=%x",
603 init_attr->cap.max_send_sge,
604 init_attr->cap.max_recv_sge, max_sge);
605 atomic_dec(&shca->num_qps);
606 return ERR_PTR(-EINVAL);
610 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
612 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
613 atomic_dec(&shca->num_qps);
614 return ERR_PTR(-ENOMEM);
617 if (pd->uobject && udata) {
619 context = pd->uobject->context;
622 atomic_set(&my_qp->nr_events, 0);
623 init_waitqueue_head(&my_qp->wait_completion);
624 spin_lock_init(&my_qp->spinlock_s);
625 spin_lock_init(&my_qp->spinlock_r);
626 my_qp->qp_type = qp_type;
627 my_qp->ext_type = parms.ext_type;
628 my_qp->state = IB_QPS_RESET;
630 if (init_attr->recv_cq)
632 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
633 if (init_attr->send_cq)
635 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
638 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
640 ehca_err(pd->device, "Can't reserve idr resources.");
641 goto create_qp_exit0;
644 write_lock_irqsave(&ehca_qp_idr_lock, flags);
645 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
646 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
647 } while (ret == -EAGAIN);
651 ehca_err(pd->device, "Can't allocate new idr entry.");
652 goto create_qp_exit0;
655 if (my_qp->token > 0x1FFFFFF) {
657 ehca_err(pd->device, "Invalid number of qp");
658 goto create_qp_exit1;
662 parms.srq_token = my_qp->token;
664 parms.servicetype = ibqptype2servicetype(qp_type);
665 if (parms.servicetype < 0) {
667 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
668 goto create_qp_exit1;
671 /* Always signal by WQE so we can hide circ. WQEs */
672 parms.sigtype = HCALL_SIGT_BY_WQE;
674 /* UD_AV CIRCUMVENTION */
675 max_send_sge = init_attr->cap.max_send_sge;
676 max_recv_sge = init_attr->cap.max_recv_sge;
677 if (parms.servicetype == ST_UD && !is_llqp) {
682 parms.token = my_qp->token;
683 parms.eq_handle = shca->eq.ipz_eq_handle;
684 parms.pd = my_pd->fw_pd;
686 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
688 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
690 parms.squeue.max_wr = init_attr->cap.max_send_wr;
691 parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
692 parms.squeue.max_sge = max_send_sge;
693 parms.rqueue.max_sge = max_recv_sge;
695 /* RC QPs need one more SWQE for unsolicited ack circumvention */
696 if (qp_type == IB_QPT_RC)
697 parms.squeue.max_wr++;
699 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
701 ehca_determine_small_queue(
702 &parms.squeue, max_send_sge, is_llqp);
704 ehca_determine_small_queue(
705 &parms.rqueue, max_recv_sge, is_llqp);
707 (parms.squeue.is_small || parms.rqueue.is_small);
710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
711 if (h_ret != H_SUCCESS) {
712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
714 ret = ehca2ib_return_code(h_ret);
715 goto create_qp_exit1;
718 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
719 my_qp->ipz_qp_handle = parms.qp_handle;
720 my_qp->galpas = parms.galpas;
722 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
723 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
728 parms.squeue.act_nr_sges = 1;
729 parms.rqueue.act_nr_sges = 1;
731 /* hide the extra WQE */
732 parms.squeue.act_nr_wqes--;
737 /* UD circumvention */
739 parms.squeue.act_nr_sges = 1;
740 parms.rqueue.act_nr_sges = 1;
742 parms.squeue.act_nr_sges -= 2;
743 parms.rqueue.act_nr_sges -= 2;
746 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
747 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
748 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
749 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
750 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
751 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
760 /* initialize r/squeue and register queue pages */
763 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
764 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
765 &parms.squeue, swqe_size);
767 ehca_err(pd->device, "Couldn't initialize squeue "
768 "and pages ret=%i", ret);
769 goto create_qp_exit2;
773 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
774 my_qp->ipz_squeue.qe_size;
775 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
776 sizeof(struct ehca_qmap_entry));
777 if (!my_qp->sq_map.map) {
778 ehca_err(pd->device, "Couldn't allocate squeue "
780 goto create_qp_exit3;
782 INIT_LIST_HEAD(&my_qp->sq_err_node);
783 /* to avoid the generation of bogus flush CQEs */
784 reset_queue_map(&my_qp->sq_map);
790 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
791 H_SUCCESS, &parms.rqueue, rwqe_size);
793 ehca_err(pd->device, "Couldn't initialize rqueue "
794 "and pages ret=%i", ret);
795 goto create_qp_exit4;
798 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
799 my_qp->ipz_rqueue.qe_size;
800 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
801 sizeof(struct ehca_qmap_entry));
802 if (!my_qp->rq_map.map) {
803 ehca_err(pd->device, "Couldn't allocate squeue "
805 goto create_qp_exit5;
807 INIT_LIST_HEAD(&my_qp->rq_err_node);
808 /* to avoid the generation of bogus flush CQEs */
809 reset_queue_map(&my_qp->rq_map);
811 } else if (init_attr->srq && !is_user) {
812 /* this is a base QP, use the queue map of the SRQ */
813 my_qp->rq_map = my_srq->rq_map;
814 INIT_LIST_HEAD(&my_qp->rq_err_node);
816 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
820 my_qp->ib_srq.pd = &my_pd->ib_pd;
821 my_qp->ib_srq.device = my_pd->ib_pd.device;
823 my_qp->ib_srq.srq_context = init_attr->qp_context;
824 my_qp->ib_srq.event_handler = init_attr->event_handler;
826 my_qp->ib_qp.qp_num = ib_qp_num;
827 my_qp->ib_qp.pd = &my_pd->ib_pd;
828 my_qp->ib_qp.device = my_pd->ib_pd.device;
830 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
831 my_qp->ib_qp.send_cq = init_attr->send_cq;
833 my_qp->ib_qp.qp_type = qp_type;
834 my_qp->ib_qp.srq = init_attr->srq;
836 my_qp->ib_qp.qp_context = init_attr->qp_context;
837 my_qp->ib_qp.event_handler = init_attr->event_handler;
840 init_attr->cap.max_inline_data = 0; /* not supported yet */
841 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
842 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
843 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
844 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
845 my_qp->init_attr = *init_attr;
847 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
848 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
850 if (ehca_nr_ports < 0) {
851 /* alloc array to cache subsequent modify qp parms
852 * for autodetect mode
855 kzalloc(EHCA_MOD_QP_PARM_MAX *
856 sizeof(*my_qp->mod_qp_parm),
858 if (!my_qp->mod_qp_parm) {
860 "Could not alloc mod_qp_parm");
861 goto create_qp_exit5;
866 /* NOTE: define_apq0() not supported yet */
867 if (qp_type == IB_QPT_GSI) {
868 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
869 if (h_ret != H_SUCCESS) {
870 kfree(my_qp->mod_qp_parm);
871 my_qp->mod_qp_parm = NULL;
872 /* the QP pointer is no longer valid */
873 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
875 ret = ehca2ib_return_code(h_ret);
876 goto create_qp_exit6;
880 if (my_qp->send_cq) {
881 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
884 "Couldn't assign qp to send_cq ret=%i", ret);
885 goto create_qp_exit7;
889 /* copy queues, galpa data to user space */
890 if (context && udata) {
891 struct ehca_create_qp_resp resp;
892 memset(&resp, 0, sizeof(resp));
894 resp.qp_num = my_qp->real_qp_num;
895 resp.token = my_qp->token;
896 resp.qp_type = my_qp->qp_type;
897 resp.ext_type = my_qp->ext_type;
898 resp.qkey = my_qp->qkey;
899 resp.real_qp_num = my_qp->real_qp_num;
902 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
904 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
905 resp.fw_handle_ofs = (u32)
906 (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
908 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
909 ehca_err(pd->device, "Copy to udata failed");
911 goto create_qp_exit8;
918 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
921 kfree(my_qp->mod_qp_parm);
924 if (HAS_RQ(my_qp) && !is_user)
925 vfree(my_qp->rq_map.map);
929 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
932 if (HAS_SQ(my_qp) && !is_user)
933 vfree(my_qp->sq_map.map);
937 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
940 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
943 write_lock_irqsave(&ehca_qp_idr_lock, flags);
944 idr_remove(&ehca_qp_idr, my_qp->token);
945 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
948 kmem_cache_free(qp_cache, my_qp);
949 atomic_dec(&shca->num_qps);
953 struct ib_qp *ehca_create_qp(struct ib_pd *pd,
954 struct ib_qp_init_attr *qp_init_attr,
955 struct ib_udata *udata)
959 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
960 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
963 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
964 struct ib_uobject *uobject);
966 struct ib_srq *ehca_create_srq(struct ib_pd *pd,
967 struct ib_srq_init_attr *srq_init_attr,
968 struct ib_udata *udata)
970 struct ib_qp_init_attr qp_init_attr;
971 struct ehca_qp *my_qp;
973 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
975 struct hcp_modify_qp_control_block *mqpcb;
976 u64 hret, update_mask;
978 /* For common attributes, internal_create_qp() takes its info
979 * out of qp_init_attr, so copy all common attrs there.
981 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
982 qp_init_attr.event_handler = srq_init_attr->event_handler;
983 qp_init_attr.qp_context = srq_init_attr->srq_context;
984 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
985 qp_init_attr.qp_type = IB_QPT_RC;
986 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
987 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
989 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
991 return (struct ib_srq *)my_qp;
993 /* copy back return values */
994 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
995 srq_init_attr->attr.max_sge = 3;
997 /* drive SRQ into RTR state */
998 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1000 ehca_err(pd->device, "Could not get zeroed page for mqpcb "
1001 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
1002 ret = ERR_PTR(-ENOMEM);
1006 mqpcb->qp_state = EHCA_QPS_INIT;
1007 mqpcb->prim_phys_port = 1;
1008 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1009 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1010 my_qp->ipz_qp_handle,
1013 mqpcb, my_qp->galpas.kernel);
1014 if (hret != H_SUCCESS) {
1015 ehca_err(pd->device, "Could not modify SRQ to INIT "
1016 "ehca_qp=%p qp_num=%x h_ret=%lli",
1017 my_qp, my_qp->real_qp_num, hret);
1021 mqpcb->qp_enable = 1;
1022 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1023 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1024 my_qp->ipz_qp_handle,
1027 mqpcb, my_qp->galpas.kernel);
1028 if (hret != H_SUCCESS) {
1029 ehca_err(pd->device, "Could not enable SRQ "
1030 "ehca_qp=%p qp_num=%x h_ret=%lli",
1031 my_qp, my_qp->real_qp_num, hret);
1035 mqpcb->qp_state = EHCA_QPS_RTR;
1036 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1037 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1038 my_qp->ipz_qp_handle,
1041 mqpcb, my_qp->galpas.kernel);
1042 if (hret != H_SUCCESS) {
1043 ehca_err(pd->device, "Could not modify SRQ to RTR "
1044 "ehca_qp=%p qp_num=%x h_ret=%lli",
1045 my_qp, my_qp->real_qp_num, hret);
1049 ehca_free_fw_ctrlblock(mqpcb);
1051 return &my_qp->ib_srq;
1054 ret = ERR_PTR(ehca2ib_return_code(hret));
1055 ehca_free_fw_ctrlblock(mqpcb);
1058 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
1064 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1065 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1066 * returns total number of bad wqes in bad_wqe_cnt
1068 static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1072 struct ipz_queue *squeue;
1073 void *bad_send_wqe_p, *bad_send_wqe_v;
1075 struct ehca_wqe *wqe;
1076 int qp_num = my_qp->ib_qp.qp_num;
1078 /* get send wqe pointer */
1079 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1080 my_qp->ipz_qp_handle, &my_qp->pf,
1081 &bad_send_wqe_p, NULL, 2);
1082 if (h_ret != H_SUCCESS) {
1083 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1084 " ehca_qp=%p qp_num=%x h_ret=%lli",
1085 my_qp, qp_num, h_ret);
1086 return ehca2ib_return_code(h_ret);
1088 bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
1089 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
1090 qp_num, bad_send_wqe_p);
1091 /* convert wqe pointer to vadr */
1092 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
1093 if (ehca_debug_level >= 2)
1094 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
1095 squeue = &my_qp->ipz_squeue;
1096 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
1097 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
1098 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
1102 /* loop sets wqe's purge bit */
1103 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1105 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
1106 if (ehca_debug_level >= 2)
1107 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
1108 wqe->nr_of_data_seg = 0; /* suppress data access */
1109 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
1110 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
1111 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1112 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
1115 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1116 * i.e. nr of wqes with flush error status is one less
1118 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
1119 qp_num, (*bad_wqe_cnt)-1);
1125 static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1126 struct ehca_queue_map *qmap)
1131 unsigned int tail_idx;
1133 /* convert real to abs address */
1134 wqe_p = wqe_p & (~(1UL << 63));
1136 wqe_v = abs_to_virt(wqe_p);
1138 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1139 ehca_gen_err("Invalid offset for calculating left cqes "
1140 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1144 tail_idx = next_index(qmap->tail, qmap->entries);
1145 wqe_idx = q_ofs / ipz_queue->qe_size;
1147 /* check all processed wqes, whether a cqe is requested or not */
1148 while (tail_idx != wqe_idx) {
1149 if (qmap->map[tail_idx].cqe_req)
1150 qmap->left_to_poll++;
1151 tail_idx = next_index(tail_idx, qmap->entries);
1153 /* save index in queue, where we have to start flushing */
1154 qmap->next_wqe_idx = wqe_idx;
1158 static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1161 void *send_wqe_p, *recv_wqe_p;
1163 unsigned long flags;
1164 int qp_num = my_qp->ib_qp.qp_num;
1166 /* this hcall is not supported on base QPs */
1167 if (my_qp->ext_type != EQPT_SRQBASE) {
1168 /* get send and receive wqe pointer */
1169 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1170 my_qp->ipz_qp_handle, &my_qp->pf,
1171 &send_wqe_p, &recv_wqe_p, 4);
1172 if (h_ret != H_SUCCESS) {
1173 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1174 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1175 my_qp, qp_num, h_ret);
1176 return ehca2ib_return_code(h_ret);
1180 * acquire lock to ensure that nobody is polling the cq which
1181 * could mean that the qmap->tail pointer is in an
1182 * inconsistent state.
1184 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1185 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1187 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1192 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1193 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1195 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1199 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1200 my_qp->sq_map.left_to_poll = 0;
1201 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
1202 my_qp->sq_map.entries);
1203 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1205 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1206 my_qp->rq_map.left_to_poll = 0;
1207 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
1208 my_qp->rq_map.entries);
1209 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1212 /* this assures flush cqes being generated only for pending wqes */
1213 if ((my_qp->sq_map.left_to_poll == 0) &&
1214 (my_qp->rq_map.left_to_poll == 0)) {
1215 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1216 ehca_add_to_err_list(my_qp, 1);
1217 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1219 if (HAS_RQ(my_qp)) {
1220 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1221 ehca_add_to_err_list(my_qp, 0);
1222 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1231 * internal_modify_qp with circumvention to handle aqp0 properly
1232 * smi_reset2init indicates if this is an internal reset-to-init-call for
1233 * smi. This flag must always be zero if called from ehca_modify_qp()!
1234 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1236 static int internal_modify_qp(struct ib_qp *ibqp,
1237 struct ib_qp_attr *attr,
1238 int attr_mask, int smi_reset2init)
1240 enum ib_qp_state qp_cur_state, qp_new_state;
1241 int cnt, qp_attr_idx, ret = 0;
1242 enum ib_qp_statetrans statetrans;
1243 struct hcp_modify_qp_control_block *mqpcb;
1244 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1245 struct ehca_shca *shca =
1246 container_of(ibqp->pd->device, struct ehca_shca, ib_device);
1249 int bad_wqe_cnt = 0;
1251 int squeue_locked = 0;
1252 unsigned long flags = 0;
1254 /* do query_qp to obtain current attr values */
1255 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
1257 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
1258 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
1262 h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
1263 my_qp->ipz_qp_handle,
1265 mqpcb, my_qp->galpas.kernel);
1266 if (h_ret != H_SUCCESS) {
1267 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1268 "ehca_qp=%p qp_num=%x h_ret=%lli",
1269 my_qp, ibqp->qp_num, h_ret);
1270 ret = ehca2ib_return_code(h_ret);
1271 goto modify_qp_exit1;
1276 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1278 if (qp_cur_state == -EINVAL) { /* invalid qp state */
1280 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
1281 "ehca_qp=%p qp_num=%x",
1282 mqpcb->qp_state, my_qp, ibqp->qp_num);
1283 goto modify_qp_exit1;
1286 * circumvention to set aqp0 initial state to init
1287 * as expected by IB spec
1289 if (smi_reset2init == 0 &&
1290 ibqp->qp_type == IB_QPT_SMI &&
1291 qp_cur_state == IB_QPS_RESET &&
1292 (attr_mask & IB_QP_STATE) &&
1293 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
1294 struct ib_qp_attr smiqp_attr = {
1295 .qp_state = IB_QPS_INIT,
1296 .port_num = my_qp->init_attr.port_num,
1300 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
1301 IB_QP_PKEY_INDEX | IB_QP_QKEY;
1302 int smirc = internal_modify_qp(
1303 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
1305 ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
1306 "ehca_modify_qp() rc=%i", smirc);
1308 goto modify_qp_exit1;
1310 qp_cur_state = IB_QPS_INIT;
1311 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
1313 /* is transmitted current state equal to "real" current state */
1314 if ((attr_mask & IB_QP_CUR_STATE) &&
1315 qp_cur_state != attr->cur_qp_state) {
1317 ehca_err(ibqp->device,
1318 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1319 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1320 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
1321 goto modify_qp_exit1;
1324 ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
1325 "new qp_state=%x attribute_mask=%x",
1326 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
1328 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1329 if (!smi_reset2init &&
1330 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1333 ehca_err(ibqp->device,
1334 "Invalid qp transition new_state=%x cur_state=%x "
1335 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1336 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
1337 goto modify_qp_exit1;
1340 mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
1341 if (mqpcb->qp_state)
1342 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1345 ehca_err(ibqp->device, "Invalid new qp state=%x "
1346 "ehca_qp=%p qp_num=%x",
1347 qp_new_state, my_qp, ibqp->qp_num);
1348 goto modify_qp_exit1;
1351 /* retrieve state transition struct to get req and opt attrs */
1352 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
1353 if (statetrans < 0) {
1355 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
1356 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1357 "qp_num=%x", qp_cur_state, qp_new_state,
1358 statetrans, my_qp, ibqp->qp_num);
1359 goto modify_qp_exit1;
1362 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
1364 if (qp_attr_idx < 0) {
1366 ehca_err(ibqp->device,
1367 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1368 ibqp->qp_type, my_qp, ibqp->qp_num);
1369 goto modify_qp_exit1;
1372 ehca_dbg(ibqp->device,
1373 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1374 my_qp, ibqp->qp_num, statetrans);
1376 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1379 if ((my_qp->qp_type == IB_QPT_UD) &&
1380 (my_qp->ext_type != EQPT_LLQP) &&
1381 (statetrans == IB_QPST_INIT2RTR) &&
1382 (shca->hw_level >= 0x22)) {
1383 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1384 mqpcb->send_grh_flag = 1;
1387 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1388 if ((my_qp->qp_type == IB_QPT_UD ||
1389 my_qp->qp_type == IB_QPT_GSI ||
1390 my_qp->qp_type == IB_QPT_SMI) &&
1391 statetrans == IB_QPST_SQE2RTS) {
1392 /* mark next free wqe if kernel */
1393 if (!ibqp->uobject) {
1394 struct ehca_wqe *wqe;
1395 /* lock send queue */
1396 spin_lock_irqsave(&my_qp->spinlock_s, flags);
1398 /* mark next free wqe */
1399 wqe = (struct ehca_wqe *)
1400 ipz_qeit_get(&my_qp->ipz_squeue);
1401 wqe->optype = wqe->wqef = 0xff;
1402 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
1405 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
1407 ehca_err(ibqp->device, "prepare_sqe_rts() failed "
1408 "ehca_qp=%p qp_num=%x ret=%i",
1409 my_qp, ibqp->qp_num, ret);
1410 goto modify_qp_exit2;
1415 * enable RDMA_Atomic_Control if reset->init und reliable con
1416 * this is necessary since gen2 does not provide that flag,
1417 * but pHyp requires it
1419 if (statetrans == IB_QPST_RESET2INIT &&
1420 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
1421 mqpcb->rdma_atomic_ctrl = 3;
1422 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
1424 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1425 if (statetrans == IB_QPST_INIT2RTR &&
1426 (ibqp->qp_type == IB_QPT_UC) &&
1427 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
1428 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
1430 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1433 if (attr_mask & IB_QP_PKEY_INDEX) {
1434 if (attr->pkey_index >= 16) {
1436 ehca_err(ibqp->device, "Invalid pkey_index=%x. "
1437 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1438 attr->pkey_index, my_qp, ibqp->qp_num);
1439 goto modify_qp_exit2;
1441 mqpcb->prim_p_key_idx = attr->pkey_index;
1442 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1444 if (attr_mask & IB_QP_PORT) {
1445 struct ehca_sport *sport;
1446 struct ehca_qp *aqp1;
1447 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1449 ehca_err(ibqp->device, "Invalid port=%x. "
1450 "ehca_qp=%p qp_num=%x num_ports=%x",
1451 attr->port_num, my_qp, ibqp->qp_num,
1453 goto modify_qp_exit2;
1455 sport = &shca->sport[attr->port_num - 1];
1456 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1457 /* should not occur */
1459 ehca_err(ibqp->device, "AQP1 was not created for "
1460 "port=%x", attr->port_num);
1461 goto modify_qp_exit2;
1463 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1464 struct ehca_qp, ib_qp);
1465 if (ibqp->qp_type != IB_QPT_GSI &&
1466 ibqp->qp_type != IB_QPT_SMI &&
1467 aqp1->mod_qp_parm) {
1469 * firmware will reject this modify_qp() because
1470 * port is not activated/initialized fully
1473 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1474 "either port is being activated (try again) "
1475 "or cabling issue", attr->port_num);
1476 goto modify_qp_exit2;
1478 mqpcb->prim_phys_port = attr->port_num;
1479 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1481 if (attr_mask & IB_QP_QKEY) {
1482 mqpcb->qkey = attr->qkey;
1483 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
1485 if (attr_mask & IB_QP_AV) {
1486 mqpcb->dlid = attr->ah_attr.dlid;
1487 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1488 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
1489 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
1490 mqpcb->service_level = attr->ah_attr.sl;
1491 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1493 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
1494 attr->ah_attr.static_rate,
1495 &mqpcb->max_static_rate)) {
1497 goto modify_qp_exit2;
1499 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1502 * Always supply the GRH flag, even if it's zero, to give the
1503 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1505 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1508 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1509 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1511 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
1512 mqpcb->send_grh_flag = 1;
1514 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
1516 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
1518 for (cnt = 0; cnt < 16; cnt++)
1519 mqpcb->dest_gid.byte[cnt] =
1520 attr->ah_attr.grh.dgid.raw[cnt];
1522 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
1523 mqpcb->flow_label = attr->ah_attr.grh.flow_label;
1524 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
1525 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
1526 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
1527 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
1529 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
1533 if (attr_mask & IB_QP_PATH_MTU) {
1535 my_qp->mtu_shift = attr->path_mtu + 7;
1536 mqpcb->path_mtu = attr->path_mtu;
1537 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1539 if (attr_mask & IB_QP_TIMEOUT) {
1540 mqpcb->timeout = attr->timeout;
1541 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
1543 if (attr_mask & IB_QP_RETRY_CNT) {
1544 mqpcb->retry_count = attr->retry_cnt;
1545 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
1547 if (attr_mask & IB_QP_RNR_RETRY) {
1548 mqpcb->rnr_retry_count = attr->rnr_retry;
1549 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
1551 if (attr_mask & IB_QP_RQ_PSN) {
1552 mqpcb->receive_psn = attr->rq_psn;
1553 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
1555 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1556 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
1557 attr->max_dest_rd_atomic : 2;
1559 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1561 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1562 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
1563 attr->max_rd_atomic : 2;
1566 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1568 if (attr_mask & IB_QP_ALT_PATH) {
1569 if (attr->alt_port_num < 1
1570 || attr->alt_port_num > shca->num_ports) {
1572 ehca_err(ibqp->device, "Invalid alt_port=%x. "
1573 "ehca_qp=%p qp_num=%x num_ports=%x",
1574 attr->alt_port_num, my_qp, ibqp->qp_num,
1576 goto modify_qp_exit2;
1578 mqpcb->alt_phys_port = attr->alt_port_num;
1580 if (attr->alt_pkey_index >= 16) {
1582 ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
1583 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1584 attr->pkey_index, my_qp, ibqp->qp_num);
1585 goto modify_qp_exit2;
1587 mqpcb->alt_p_key_idx = attr->alt_pkey_index;
1589 mqpcb->timeout_al = attr->alt_timeout;
1590 mqpcb->dlid_al = attr->alt_ah_attr.dlid;
1591 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1592 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1594 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
1595 attr->alt_ah_attr.static_rate,
1596 &mqpcb->max_static_rate_al)) {
1598 goto modify_qp_exit2;
1601 /* OpenIB doesn't support alternate retry counts - copy them */
1602 mqpcb->retry_count_al = mqpcb->retry_count;
1603 mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
1605 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
1606 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
1607 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
1608 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
1609 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
1610 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
1611 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
1612 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
1613 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
1616 * Always supply the GRH flag, even if it's zero, to give the
1617 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1619 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
1622 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1623 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1625 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
1626 mqpcb->send_grh_flag_al = 1;
1628 for (cnt = 0; cnt < 16; cnt++)
1629 mqpcb->dest_gid_al.byte[cnt] =
1630 attr->alt_ah_attr.grh.dgid.raw[cnt];
1631 mqpcb->source_gid_idx_al =
1632 attr->alt_ah_attr.grh.sgid_index;
1633 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
1634 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
1635 mqpcb->traffic_class_al =
1636 attr->alt_ah_attr.grh.traffic_class;
1639 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
1640 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
1641 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
1642 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
1643 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
1647 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1648 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
1650 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
1653 if (attr_mask & IB_QP_SQ_PSN) {
1654 mqpcb->send_psn = attr->sq_psn;
1655 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
1658 if (attr_mask & IB_QP_DEST_QPN) {
1659 mqpcb->dest_qp_nr = attr->dest_qp_num;
1660 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
1663 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1664 if (attr->path_mig_state != IB_MIG_REARM
1665 && attr->path_mig_state != IB_MIG_MIGRATED) {
1667 ehca_err(ibqp->device, "Invalid mig_state=%x",
1668 attr->path_mig_state);
1669 goto modify_qp_exit2;
1671 mqpcb->path_migration_state = attr->path_mig_state + 1;
1672 if (attr->path_mig_state == IB_MIG_REARM)
1673 my_qp->mig_armed = 1;
1675 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1678 if (attr_mask & IB_QP_CAP) {
1679 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
1681 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
1682 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
1684 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
1685 /* no support for max_send/recv_sge yet */
1688 if (ehca_debug_level >= 2)
1689 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1691 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1692 my_qp->ipz_qp_handle,
1695 mqpcb, my_qp->galpas.kernel);
1697 if (h_ret != H_SUCCESS) {
1698 ret = ehca2ib_return_code(h_ret);
1699 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
1700 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1701 goto modify_qp_exit2;
1704 if ((my_qp->qp_type == IB_QPT_UD ||
1705 my_qp->qp_type == IB_QPT_GSI ||
1706 my_qp->qp_type == IB_QPT_SMI) &&
1707 statetrans == IB_QPST_SQE2RTS) {
1708 /* doorbell to reprocessing wqes */
1709 iosync(); /* serialize GAL register access */
1710 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1711 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
1714 if (statetrans == IB_QPST_RESET2INIT ||
1715 statetrans == IB_QPST_INIT2INIT) {
1716 mqpcb->qp_enable = 1;
1717 mqpcb->qp_state = EHCA_QPS_INIT;
1719 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1721 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1722 my_qp->ipz_qp_handle,
1726 my_qp->galpas.kernel);
1728 if (h_ret != H_SUCCESS) {
1729 ret = ehca2ib_return_code(h_ret);
1730 ehca_err(ibqp->device, "ENABLE in context of "
1731 "RESET_2_INIT failed! Maybe you didn't get "
1732 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1733 h_ret, my_qp, ibqp->qp_num);
1734 goto modify_qp_exit2;
1737 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1739 ret = check_for_left_cqes(my_qp, shca);
1741 goto modify_qp_exit2;
1744 if (statetrans == IB_QPST_ANY2RESET) {
1745 ipz_qeit_reset(&my_qp->ipz_rqueue);
1746 ipz_qeit_reset(&my_qp->ipz_squeue);
1748 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1749 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1752 del_from_err_list(my_qp->recv_cq,
1753 &my_qp->rq_err_node);
1756 reset_queue_map(&my_qp->sq_map);
1758 if (HAS_RQ(my_qp) && !is_user)
1759 reset_queue_map(&my_qp->rq_map);
1762 if (attr_mask & IB_QP_QKEY)
1763 my_qp->qkey = attr->qkey;
1766 if (squeue_locked) { /* this means: sqe -> rts */
1767 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
1768 my_qp->sqerr_purgeflag = 1;
1772 ehca_free_fw_ctrlblock(mqpcb);
1777 int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1778 struct ib_udata *udata)
1782 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1784 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1786 /* The if-block below caches qp_attr to be modified for GSI and SMI
1787 * qps during the initialization by ib_mad. When the respective port
1788 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1789 * cached modify calls sequence, see ehca_recover_sqs() below.
1790 * Why that is required:
1791 * 1) If one port is connected, older code requires that port one
1792 * to be connected and module option nr_ports=1 to be given by
1793 * user, which is very inconvenient for end user.
1794 * 2) Firmware accepts modify_qp() only if respective port has become
1795 * active. Older code had a wait loop of 30sec create_qp()/
1796 * define_aqp1(), which is not appropriate in practice. This
1797 * code now removes that wait loop, see define_aqp1(), and always
1798 * reports all ports to ib_mad resp. users. Only activated ports
1799 * will then usable for the users.
1801 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1802 int port = my_qp->init_attr.port_num;
1803 struct ehca_sport *sport = &shca->sport[port - 1];
1804 unsigned long flags;
1805 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1806 /* cache qp_attr only during init */
1807 if (my_qp->mod_qp_parm) {
1808 struct ehca_mod_qp_parm *p;
1809 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1810 ehca_err(&shca->ib_device,
1811 "mod_qp_parm overflow state=%x port=%x"
1812 " type=%x", attr->qp_state,
1813 my_qp->init_attr.port_num,
1815 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1819 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1820 p->mask = attr_mask;
1822 my_qp->mod_qp_parm_idx++;
1823 ehca_dbg(&shca->ib_device,
1824 "Saved qp_attr for state=%x port=%x type=%x",
1825 attr->qp_state, my_qp->init_attr.port_num,
1827 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1830 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1833 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1836 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1837 my_qp->state = attr->qp_state;
1842 void ehca_recover_sqp(struct ib_qp *sqp)
1844 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1845 int port = my_sqp->init_attr.port_num;
1846 struct ib_qp_attr attr;
1847 struct ehca_mod_qp_parm *qp_parm;
1848 int i, qp_parm_idx, ret;
1849 unsigned long flags, wr_cnt;
1851 if (!my_sqp->mod_qp_parm)
1853 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1855 qp_parm = my_sqp->mod_qp_parm;
1856 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1857 for (i = 0; i < qp_parm_idx; i++) {
1858 attr = qp_parm[i].attr;
1859 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1861 ehca_err(sqp->device, "Could not modify SQP port=%x "
1862 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1865 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1866 port, sqp->qp_num, attr.qp_state);
1869 /* re-trigger posted recv wrs */
1870 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1871 my_sqp->ipz_rqueue.qe_size;
1873 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1874 hipz_update_rqa(my_sqp, wr_cnt);
1875 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1876 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1877 port, sqp->qp_num, wr_cnt);
1882 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1883 my_sqp->mod_qp_parm = NULL;
1886 int ehca_query_qp(struct ib_qp *qp,
1887 struct ib_qp_attr *qp_attr,
1888 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1890 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1891 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1893 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1894 struct hcp_modify_qp_control_block *qpcb;
1898 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1899 ehca_err(qp->device, "Invalid attribute mask "
1900 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1901 my_qp, qp->qp_num, qp_attr_mask);
1905 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1907 ehca_err(qp->device, "Out of memory for qpcb "
1908 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
1912 h_ret = hipz_h_query_qp(adapter_handle,
1913 my_qp->ipz_qp_handle,
1915 qpcb, my_qp->galpas.kernel);
1917 if (h_ret != H_SUCCESS) {
1918 ret = ehca2ib_return_code(h_ret);
1919 ehca_err(qp->device, "hipz_h_query_qp() failed "
1920 "ehca_qp=%p qp_num=%x h_ret=%lli",
1921 my_qp, qp->qp_num, h_ret);
1922 goto query_qp_exit1;
1925 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
1926 qp_attr->qp_state = qp_attr->cur_qp_state;
1928 if (qp_attr->cur_qp_state == -EINVAL) {
1930 ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
1931 "ehca_qp=%p qp_num=%x",
1932 qpcb->qp_state, my_qp, qp->qp_num);
1933 goto query_qp_exit1;
1936 if (qp_attr->qp_state == IB_QPS_SQD)
1937 qp_attr->sq_draining = 1;
1939 qp_attr->qkey = qpcb->qkey;
1940 qp_attr->path_mtu = qpcb->path_mtu;
1941 qp_attr->path_mig_state = qpcb->path_migration_state - 1;
1942 qp_attr->rq_psn = qpcb->receive_psn;
1943 qp_attr->sq_psn = qpcb->send_psn;
1944 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
1945 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
1946 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
1947 /* UD_AV CIRCUMVENTION */
1948 if (my_qp->qp_type == IB_QPT_UD) {
1949 qp_attr->cap.max_send_sge =
1950 qpcb->actual_nr_sges_in_sq_wqe - 2;
1951 qp_attr->cap.max_recv_sge =
1952 qpcb->actual_nr_sges_in_rq_wqe - 2;
1954 qp_attr->cap.max_send_sge =
1955 qpcb->actual_nr_sges_in_sq_wqe;
1956 qp_attr->cap.max_recv_sge =
1957 qpcb->actual_nr_sges_in_rq_wqe;
1960 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1961 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1963 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1964 qp_attr->port_num = qpcb->prim_phys_port;
1965 qp_attr->timeout = qpcb->timeout;
1966 qp_attr->retry_cnt = qpcb->retry_count;
1967 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1969 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1970 qp_attr->alt_port_num = qpcb->alt_phys_port;
1971 qp_attr->alt_timeout = qpcb->timeout_al;
1973 qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
1974 qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
1977 qp_attr->ah_attr.sl = qpcb->service_level;
1979 if (qpcb->send_grh_flag) {
1980 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1983 qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
1984 qp_attr->ah_attr.dlid = qpcb->dlid;
1985 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
1986 qp_attr->ah_attr.port_num = qp_attr->port_num;
1989 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
1990 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
1991 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
1992 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
1994 for (cnt = 0; cnt < 16; cnt++)
1995 qp_attr->ah_attr.grh.dgid.raw[cnt] =
1996 qpcb->dest_gid.byte[cnt];
1999 qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
2000 if (qpcb->send_grh_flag_al) {
2001 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
2004 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
2005 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
2006 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
2009 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
2010 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
2011 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
2012 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
2014 for (cnt = 0; cnt < 16; cnt++)
2015 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
2016 qpcb->dest_gid_al.byte[cnt];
2018 /* return init attributes given in ehca_create_qp */
2020 *qp_init_attr = my_qp->init_attr;
2022 if (ehca_debug_level >= 2)
2023 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
2026 ehca_free_fw_ctrlblock(qpcb);
2031 int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2032 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
2034 struct ehca_qp *my_qp =
2035 container_of(ibsrq, struct ehca_qp, ib_srq);
2036 struct ehca_shca *shca =
2037 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
2038 struct hcp_modify_qp_control_block *mqpcb;
2043 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2045 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
2046 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
2051 if (attr_mask & IB_SRQ_LIMIT) {
2052 attr_mask &= ~IB_SRQ_LIMIT;
2054 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2055 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2056 mqpcb->curr_srq_limit = attr->srq_limit;
2057 mqpcb->qp_aff_asyn_ev_log_reg =
2058 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2061 /* by now, all bits in attr_mask should have been cleared */
2063 ehca_err(ibsrq->device, "invalid attribute mask bits set "
2064 "attr_mask=%x", attr_mask);
2066 goto modify_srq_exit0;
2069 if (ehca_debug_level >= 2)
2070 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2072 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
2073 NULL, update_mask, mqpcb,
2074 my_qp->galpas.kernel);
2076 if (h_ret != H_SUCCESS) {
2077 ret = ehca2ib_return_code(h_ret);
2078 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
2079 "ehca_qp=%p qp_num=%x",
2080 h_ret, my_qp, my_qp->real_qp_num);
2084 ehca_free_fw_ctrlblock(mqpcb);
2089 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2091 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
2092 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
2094 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
2095 struct hcp_modify_qp_control_block *qpcb;
2099 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2101 ehca_err(srq->device, "Out of memory for qpcb "
2102 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
2106 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
2107 NULL, qpcb, my_qp->galpas.kernel);
2109 if (h_ret != H_SUCCESS) {
2110 ret = ehca2ib_return_code(h_ret);
2111 ehca_err(srq->device, "hipz_h_query_qp() failed "
2112 "ehca_qp=%p qp_num=%x h_ret=%lli",
2113 my_qp, my_qp->real_qp_num, h_ret);
2114 goto query_srq_exit1;
2117 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2118 srq_attr->max_sge = 3;
2119 srq_attr->srq_limit = qpcb->curr_srq_limit;
2121 if (ehca_debug_level >= 2)
2122 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2125 ehca_free_fw_ctrlblock(qpcb);
2130 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2131 struct ib_uobject *uobject)
2133 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
2134 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
2136 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
2137 u32 qp_num = my_qp->real_qp_num;
2142 enum ib_qp_type qp_type;
2143 unsigned long flags;
2147 if (my_qp->mm_count_galpa ||
2148 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2149 ehca_err(dev, "Resources still referenced in "
2150 "user space qp_num=%x", qp_num);
2155 if (my_qp->send_cq) {
2156 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
2158 ehca_err(dev, "Couldn't unassign qp from "
2159 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
2160 qp_num, my_qp->send_cq->cq_number);
2165 write_lock_irqsave(&ehca_qp_idr_lock, flags);
2166 idr_remove(&ehca_qp_idr, my_qp->token);
2167 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
2170 * SRQs will never get into an error list and do not have a recv_cq,
2171 * so we need to skip them here.
2173 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2174 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2176 if (HAS_SQ(my_qp) && !is_user)
2177 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2179 /* now wait until all pending events have completed */
2180 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
2182 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2183 if (h_ret != H_SUCCESS) {
2184 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
2185 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2186 return ehca2ib_return_code(h_ret);
2189 port_num = my_qp->init_attr.port_num;
2190 qp_type = my_qp->init_attr.qp_type;
2192 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
2193 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
2194 kfree(my_qp->mod_qp_parm);
2195 my_qp->mod_qp_parm = NULL;
2196 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
2197 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
2200 /* no support for IB_QPT_SMI yet */
2201 if (qp_type == IB_QPT_GSI) {
2202 struct ib_event event;
2203 ehca_info(dev, "device %s: port %x is inactive.",
2204 shca->ib_device.name, port_num);
2205 event.device = &shca->ib_device;
2206 event.event = IB_EVENT_PORT_ERR;
2207 event.element.port_num = port_num;
2208 shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
2209 ib_dispatch_event(&event);
2212 if (HAS_RQ(my_qp)) {
2213 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2215 vfree(my_qp->rq_map.map);
2217 if (HAS_SQ(my_qp)) {
2218 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2220 vfree(my_qp->sq_map.map);
2222 kmem_cache_free(qp_cache, my_qp);
2223 atomic_dec(&shca->num_qps);
2227 int ehca_destroy_qp(struct ib_qp *qp)
2229 return internal_destroy_qp(qp->device,
2230 container_of(qp, struct ehca_qp, ib_qp),
2234 int ehca_destroy_srq(struct ib_srq *srq)
2236 return internal_destroy_qp(srq->device,
2237 container_of(srq, struct ehca_qp, ib_srq),
2241 int ehca_init_qp_cache(void)
2243 qp_cache = kmem_cache_create("ehca_cache_qp",
2244 sizeof(struct ehca_qp), 0,
2252 void ehca_cleanup_qp_cache(void)
2255 kmem_cache_destroy(qp_cache);