Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / infiniband / hw / mthca / mthca_qp.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39
40 #include <asm/io.h>
41
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
45
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
50
51 enum {
52         MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53         MTHCA_ACK_REQ_FREQ       = 10,
54         MTHCA_FLIGHT_LIMIT       = 9,
55         MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */
56         MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */
57         MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */
58 };
59
60 enum {
61         MTHCA_QP_STATE_RST  = 0,
62         MTHCA_QP_STATE_INIT = 1,
63         MTHCA_QP_STATE_RTR  = 2,
64         MTHCA_QP_STATE_RTS  = 3,
65         MTHCA_QP_STATE_SQE  = 4,
66         MTHCA_QP_STATE_SQD  = 5,
67         MTHCA_QP_STATE_ERR  = 6,
68         MTHCA_QP_STATE_DRAINING = 7
69 };
70
71 enum {
72         MTHCA_QP_ST_RC  = 0x0,
73         MTHCA_QP_ST_UC  = 0x1,
74         MTHCA_QP_ST_RD  = 0x2,
75         MTHCA_QP_ST_UD  = 0x3,
76         MTHCA_QP_ST_MLX = 0x7
77 };
78
79 enum {
80         MTHCA_QP_PM_MIGRATED = 0x3,
81         MTHCA_QP_PM_ARMED    = 0x0,
82         MTHCA_QP_PM_REARM    = 0x1
83 };
84
85 enum {
86         /* qp_context flags */
87         MTHCA_QP_BIT_DE  = 1 <<  8,
88         /* params1 */
89         MTHCA_QP_BIT_SRE = 1 << 15,
90         MTHCA_QP_BIT_SWE = 1 << 14,
91         MTHCA_QP_BIT_SAE = 1 << 13,
92         MTHCA_QP_BIT_SIC = 1 <<  4,
93         MTHCA_QP_BIT_SSC = 1 <<  3,
94         /* params2 */
95         MTHCA_QP_BIT_RRE = 1 << 15,
96         MTHCA_QP_BIT_RWE = 1 << 14,
97         MTHCA_QP_BIT_RAE = 1 << 13,
98         MTHCA_QP_BIT_RIC = 1 <<  4,
99         MTHCA_QP_BIT_RSC = 1 <<  3
100 };
101
102 enum {
103         MTHCA_SEND_DOORBELL_FENCE = 1 << 5
104 };
105
106 struct mthca_qp_path {
107         __be32 port_pkey;
108         u8     rnr_retry;
109         u8     g_mylmc;
110         __be16 rlid;
111         u8     ackto;
112         u8     mgid_index;
113         u8     static_rate;
114         u8     hop_limit;
115         __be32 sl_tclass_flowlabel;
116         u8     rgid[16];
117 } __attribute__((packed));
118
119 struct mthca_qp_context {
120         __be32 flags;
121         __be32 tavor_sched_queue; /* Reserved on Arbel */
122         u8     mtu_msgmax;
123         u8     rq_size_stride;  /* Reserved on Tavor */
124         u8     sq_size_stride;  /* Reserved on Tavor */
125         u8     rlkey_arbel_sched_queue; /* Reserved on Tavor */
126         __be32 usr_page;
127         __be32 local_qpn;
128         __be32 remote_qpn;
129         u32    reserved1[2];
130         struct mthca_qp_path pri_path;
131         struct mthca_qp_path alt_path;
132         __be32 rdd;
133         __be32 pd;
134         __be32 wqe_base;
135         __be32 wqe_lkey;
136         __be32 params1;
137         __be32 reserved2;
138         __be32 next_send_psn;
139         __be32 cqn_snd;
140         __be32 snd_wqe_base_l;  /* Next send WQE on Tavor */
141         __be32 snd_db_index;    /* (debugging only entries) */
142         __be32 last_acked_psn;
143         __be32 ssn;
144         __be32 params2;
145         __be32 rnr_nextrecvpsn;
146         __be32 ra_buff_indx;
147         __be32 cqn_rcv;
148         __be32 rcv_wqe_base_l;  /* Next recv WQE on Tavor */
149         __be32 rcv_db_index;    /* (debugging only entries) */
150         __be32 qkey;
151         __be32 srqn;
152         __be32 rmsn;
153         __be16 rq_wqe_counter;  /* reserved on Tavor */
154         __be16 sq_wqe_counter;  /* reserved on Tavor */
155         u32    reserved3[18];
156 } __attribute__((packed));
157
158 struct mthca_qp_param {
159         __be32 opt_param_mask;
160         u32    reserved1;
161         struct mthca_qp_context context;
162         u32    reserved2[62];
163 } __attribute__((packed));
164
165 enum {
166         MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,
167         MTHCA_QP_OPTPAR_RRE               = 1 << 1,
168         MTHCA_QP_OPTPAR_RAE               = 1 << 2,
169         MTHCA_QP_OPTPAR_RWE               = 1 << 3,
170         MTHCA_QP_OPTPAR_PKEY_INDEX        = 1 << 4,
171         MTHCA_QP_OPTPAR_Q_KEY             = 1 << 5,
172         MTHCA_QP_OPTPAR_RNR_TIMEOUT       = 1 << 6,
173         MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
174         MTHCA_QP_OPTPAR_SRA_MAX           = 1 << 8,
175         MTHCA_QP_OPTPAR_RRA_MAX           = 1 << 9,
176         MTHCA_QP_OPTPAR_PM_STATE          = 1 << 10,
177         MTHCA_QP_OPTPAR_PORT_NUM          = 1 << 11,
178         MTHCA_QP_OPTPAR_RETRY_COUNT       = 1 << 12,
179         MTHCA_QP_OPTPAR_ALT_RNR_RETRY     = 1 << 13,
180         MTHCA_QP_OPTPAR_ACK_TIMEOUT       = 1 << 14,
181         MTHCA_QP_OPTPAR_RNR_RETRY         = 1 << 15,
182         MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16
183 };
184
185 static const u8 mthca_opcode[] = {
186         [IB_WR_SEND]                 = MTHCA_OPCODE_SEND,
187         [IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,
188         [IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,
189         [IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,
190         [IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,
191         [IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,
192         [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
193 };
194
195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
196 {
197         return qp->qpn >= dev->qp_table.sqp_start &&
198                 qp->qpn <= dev->qp_table.sqp_start + 3;
199 }
200
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
202 {
203         return qp->qpn >= dev->qp_table.sqp_start &&
204                 qp->qpn <= dev->qp_table.sqp_start + 1;
205 }
206
207 static void *get_recv_wqe(struct mthca_qp *qp, int n)
208 {
209         if (qp->is_direct)
210                 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
211         else
212                 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
213                         ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
214 }
215
216 static void *get_send_wqe(struct mthca_qp *qp, int n)
217 {
218         if (qp->is_direct)
219                 return qp->queue.direct.buf + qp->send_wqe_offset +
220                         (n << qp->sq.wqe_shift);
221         else
222                 return qp->queue.page_list[(qp->send_wqe_offset +
223                                             (n << qp->sq.wqe_shift)) >>
224                                            PAGE_SHIFT].buf +
225                         ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
226                          (PAGE_SIZE - 1));
227 }
228
229 static void mthca_wq_reset(struct mthca_wq *wq)
230 {
231         wq->next_ind  = 0;
232         wq->last_comp = wq->max - 1;
233         wq->head      = 0;
234         wq->tail      = 0;
235 }
236
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
238                     enum ib_event_type event_type)
239 {
240         struct mthca_qp *qp;
241         struct ib_event event;
242
243         spin_lock(&dev->qp_table.lock);
244         qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
245         if (qp)
246                 ++qp->refcount;
247         spin_unlock(&dev->qp_table.lock);
248
249         if (!qp) {
250                 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
251                 return;
252         }
253
254         if (event_type == IB_EVENT_PATH_MIG)
255                 qp->port = qp->alt_port;
256
257         event.device      = &dev->ib_dev;
258         event.event       = event_type;
259         event.element.qp  = &qp->ibqp;
260         if (qp->ibqp.event_handler)
261                 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
262
263         spin_lock(&dev->qp_table.lock);
264         if (!--qp->refcount)
265                 wake_up(&qp->wait);
266         spin_unlock(&dev->qp_table.lock);
267 }
268
269 static int to_mthca_state(enum ib_qp_state ib_state)
270 {
271         switch (ib_state) {
272         case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
273         case IB_QPS_INIT:  return MTHCA_QP_STATE_INIT;
274         case IB_QPS_RTR:   return MTHCA_QP_STATE_RTR;
275         case IB_QPS_RTS:   return MTHCA_QP_STATE_RTS;
276         case IB_QPS_SQD:   return MTHCA_QP_STATE_SQD;
277         case IB_QPS_SQE:   return MTHCA_QP_STATE_SQE;
278         case IB_QPS_ERR:   return MTHCA_QP_STATE_ERR;
279         default:                return -1;
280         }
281 }
282
283 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
284
285 static int to_mthca_st(int transport)
286 {
287         switch (transport) {
288         case RC:  return MTHCA_QP_ST_RC;
289         case UC:  return MTHCA_QP_ST_UC;
290         case UD:  return MTHCA_QP_ST_UD;
291         case RD:  return MTHCA_QP_ST_RD;
292         case MLX: return MTHCA_QP_ST_MLX;
293         default:  return -1;
294         }
295 }
296
297 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
298                         int attr_mask)
299 {
300         if (attr_mask & IB_QP_PKEY_INDEX)
301                 sqp->pkey_index = attr->pkey_index;
302         if (attr_mask & IB_QP_QKEY)
303                 sqp->qkey = attr->qkey;
304         if (attr_mask & IB_QP_SQ_PSN)
305                 sqp->send_psn = attr->sq_psn;
306 }
307
308 static void init_port(struct mthca_dev *dev, int port)
309 {
310         int err;
311         struct mthca_init_ib_param param;
312
313         memset(&param, 0, sizeof param);
314
315         param.port_width = dev->limits.port_width_cap;
316         param.vl_cap     = dev->limits.vl_cap;
317         param.mtu_cap    = dev->limits.mtu_cap;
318         param.gid_cap    = dev->limits.gid_table_len;
319         param.pkey_cap   = dev->limits.pkey_table_len;
320
321         err = mthca_INIT_IB(dev, &param, port);
322         if (err)
323                 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
324 }
325
326 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
327                                   int attr_mask)
328 {
329         u8 dest_rd_atomic;
330         u32 access_flags;
331         u32 hw_access_flags = 0;
332
333         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
334                 dest_rd_atomic = attr->max_dest_rd_atomic;
335         else
336                 dest_rd_atomic = qp->resp_depth;
337
338         if (attr_mask & IB_QP_ACCESS_FLAGS)
339                 access_flags = attr->qp_access_flags;
340         else
341                 access_flags = qp->atomic_rd_en;
342
343         if (!dest_rd_atomic)
344                 access_flags &= IB_ACCESS_REMOTE_WRITE;
345
346         if (access_flags & IB_ACCESS_REMOTE_READ)
347                 hw_access_flags |= MTHCA_QP_BIT_RRE;
348         if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
349                 hw_access_flags |= MTHCA_QP_BIT_RAE;
350         if (access_flags & IB_ACCESS_REMOTE_WRITE)
351                 hw_access_flags |= MTHCA_QP_BIT_RWE;
352
353         return cpu_to_be32(hw_access_flags);
354 }
355
356 static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
357 {
358         switch (mthca_state) {
359         case MTHCA_QP_STATE_RST:      return IB_QPS_RESET;
360         case MTHCA_QP_STATE_INIT:     return IB_QPS_INIT;
361         case MTHCA_QP_STATE_RTR:      return IB_QPS_RTR;
362         case MTHCA_QP_STATE_RTS:      return IB_QPS_RTS;
363         case MTHCA_QP_STATE_DRAINING:
364         case MTHCA_QP_STATE_SQD:      return IB_QPS_SQD;
365         case MTHCA_QP_STATE_SQE:      return IB_QPS_SQE;
366         case MTHCA_QP_STATE_ERR:      return IB_QPS_ERR;
367         default:                      return -1;
368         }
369 }
370
371 static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
372 {
373         switch (mthca_mig_state) {
374         case 0:  return IB_MIG_ARMED;
375         case 1:  return IB_MIG_REARM;
376         case 3:  return IB_MIG_MIGRATED;
377         default: return -1;
378         }
379 }
380
381 static int to_ib_qp_access_flags(int mthca_flags)
382 {
383         int ib_flags = 0;
384
385         if (mthca_flags & MTHCA_QP_BIT_RRE)
386                 ib_flags |= IB_ACCESS_REMOTE_READ;
387         if (mthca_flags & MTHCA_QP_BIT_RWE)
388                 ib_flags |= IB_ACCESS_REMOTE_WRITE;
389         if (mthca_flags & MTHCA_QP_BIT_RAE)
390                 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
391
392         return ib_flags;
393 }
394
395 static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
396                                 struct mthca_qp_path *path)
397 {
398         memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
399         ib_ah_attr->port_num      = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
400
401         if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
402                 return;
403
404         ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
405         ib_ah_attr->sl            = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
406         ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
407         ib_ah_attr->static_rate   = mthca_rate_to_ib(dev,
408                                                      path->static_rate & 0xf,
409                                                      ib_ah_attr->port_num);
410         ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
411         if (ib_ah_attr->ah_flags) {
412                 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
413                 ib_ah_attr->grh.hop_limit  = path->hop_limit;
414                 ib_ah_attr->grh.traffic_class =
415                         (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
416                 ib_ah_attr->grh.flow_label =
417                         be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
418                 memcpy(ib_ah_attr->grh.dgid.raw,
419                         path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
420         }
421 }
422
423 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
424                    struct ib_qp_init_attr *qp_init_attr)
425 {
426         struct mthca_dev *dev = to_mdev(ibqp->device);
427         struct mthca_qp *qp = to_mqp(ibqp);
428         int err = 0;
429         struct mthca_mailbox *mailbox = NULL;
430         struct mthca_qp_param *qp_param;
431         struct mthca_qp_context *context;
432         int mthca_state;
433
434         mutex_lock(&qp->mutex);
435
436         if (qp->state == IB_QPS_RESET) {
437                 qp_attr->qp_state = IB_QPS_RESET;
438                 goto done;
439         }
440
441         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
442         if (IS_ERR(mailbox)) {
443                 err = PTR_ERR(mailbox);
444                 goto out;
445         }
446
447         err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
448         if (err) {
449                 mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
450                 goto out_mailbox;
451         }
452
453         qp_param    = mailbox->buf;
454         context     = &qp_param->context;
455         mthca_state = be32_to_cpu(context->flags) >> 28;
456
457         qp->state                    = to_ib_qp_state(mthca_state);
458         qp_attr->qp_state            = qp->state;
459         qp_attr->path_mtu            = context->mtu_msgmax >> 5;
460         qp_attr->path_mig_state      =
461                 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
462         qp_attr->qkey                = be32_to_cpu(context->qkey);
463         qp_attr->rq_psn              = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
464         qp_attr->sq_psn              = be32_to_cpu(context->next_send_psn) & 0xffffff;
465         qp_attr->dest_qp_num         = be32_to_cpu(context->remote_qpn) & 0xffffff;
466         qp_attr->qp_access_flags     =
467                 to_ib_qp_access_flags(be32_to_cpu(context->params2));
468
469         if (qp->transport == RC || qp->transport == UC) {
470                 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
471                 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
472                 qp_attr->alt_pkey_index =
473                         be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
474                 qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
475         }
476
477         qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
478         qp_attr->port_num   =
479                 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
480
481         /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
482         qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
483
484         qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
485
486         qp_attr->max_dest_rd_atomic =
487                 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
488         qp_attr->min_rnr_timer      =
489                 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
490         qp_attr->timeout            = context->pri_path.ackto >> 3;
491         qp_attr->retry_cnt          = (be32_to_cpu(context->params1) >> 16) & 0x7;
492         qp_attr->rnr_retry          = context->pri_path.rnr_retry >> 5;
493         qp_attr->alt_timeout        = context->alt_path.ackto >> 3;
494
495 done:
496         qp_attr->cur_qp_state        = qp_attr->qp_state;
497         qp_attr->cap.max_send_wr     = qp->sq.max;
498         qp_attr->cap.max_recv_wr     = qp->rq.max;
499         qp_attr->cap.max_send_sge    = qp->sq.max_gs;
500         qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
501         qp_attr->cap.max_inline_data = qp->max_inline_data;
502
503         qp_init_attr->cap            = qp_attr->cap;
504
505 out_mailbox:
506         mthca_free_mailbox(dev, mailbox);
507
508 out:
509         mutex_unlock(&qp->mutex);
510         return err;
511 }
512
513 static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
514                           struct mthca_qp_path *path, u8 port)
515 {
516         path->g_mylmc     = ah->src_path_bits & 0x7f;
517         path->rlid        = cpu_to_be16(ah->dlid);
518         path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
519
520         if (ah->ah_flags & IB_AH_GRH) {
521                 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
522                         mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
523                                   ah->grh.sgid_index, dev->limits.gid_table_len-1);
524                         return -1;
525                 }
526
527                 path->g_mylmc   |= 1 << 7;
528                 path->mgid_index = ah->grh.sgid_index;
529                 path->hop_limit  = ah->grh.hop_limit;
530                 path->sl_tclass_flowlabel =
531                         cpu_to_be32((ah->sl << 28)                |
532                                     (ah->grh.traffic_class << 20) |
533                                     (ah->grh.flow_label));
534                 memcpy(path->rgid, ah->grh.dgid.raw, 16);
535         } else
536                 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
537
538         return 0;
539 }
540
541 static int __mthca_modify_qp(struct ib_qp *ibqp,
542                              const struct ib_qp_attr *attr, int attr_mask,
543                              enum ib_qp_state cur_state, enum ib_qp_state new_state)
544 {
545         struct mthca_dev *dev = to_mdev(ibqp->device);
546         struct mthca_qp *qp = to_mqp(ibqp);
547         struct mthca_mailbox *mailbox;
548         struct mthca_qp_param *qp_param;
549         struct mthca_qp_context *qp_context;
550         u32 sqd_event = 0;
551         int err = -EINVAL;
552
553         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
554         if (IS_ERR(mailbox)) {
555                 err = PTR_ERR(mailbox);
556                 goto out;
557         }
558         qp_param = mailbox->buf;
559         qp_context = &qp_param->context;
560         memset(qp_param, 0, sizeof *qp_param);
561
562         qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |
563                                              (to_mthca_st(qp->transport) << 16));
564         qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);
565         if (!(attr_mask & IB_QP_PATH_MIG_STATE))
566                 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
567         else {
568                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
569                 switch (attr->path_mig_state) {
570                 case IB_MIG_MIGRATED:
571                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
572                         break;
573                 case IB_MIG_REARM:
574                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
575                         break;
576                 case IB_MIG_ARMED:
577                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
578                         break;
579                 }
580         }
581
582         /* leave tavor_sched_queue as 0 */
583
584         if (qp->transport == MLX || qp->transport == UD)
585                 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
586         else if (attr_mask & IB_QP_PATH_MTU) {
587                 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
588                         mthca_dbg(dev, "path MTU (%u) is invalid\n",
589                                   attr->path_mtu);
590                         goto out_mailbox;
591                 }
592                 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
593         }
594
595         if (mthca_is_memfree(dev)) {
596                 if (qp->rq.max)
597                         qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
598                 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
599
600                 if (qp->sq.max)
601                         qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
602                 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
603         }
604
605         /* leave arbel_sched_queue as 0 */
606
607         if (qp->ibqp.uobject)
608                 qp_context->usr_page =
609                         cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
610         else
611                 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
612         qp_context->local_qpn  = cpu_to_be32(qp->qpn);
613         if (attr_mask & IB_QP_DEST_QPN) {
614                 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
615         }
616
617         if (qp->transport == MLX)
618                 qp_context->pri_path.port_pkey |=
619                         cpu_to_be32(qp->port << 24);
620         else {
621                 if (attr_mask & IB_QP_PORT) {
622                         qp_context->pri_path.port_pkey |=
623                                 cpu_to_be32(attr->port_num << 24);
624                         qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
625                 }
626         }
627
628         if (attr_mask & IB_QP_PKEY_INDEX) {
629                 qp_context->pri_path.port_pkey |=
630                         cpu_to_be32(attr->pkey_index);
631                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
632         }
633
634         if (attr_mask & IB_QP_RNR_RETRY) {
635                 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
636                         attr->rnr_retry << 5;
637                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
638                                                         MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
639         }
640
641         if (attr_mask & IB_QP_AV) {
642                 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
643                                    attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
644                         goto out_mailbox;
645
646                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
647         }
648
649         if (ibqp->qp_type == IB_QPT_RC &&
650             cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
651                 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
652
653                 if (mthca_is_memfree(dev))
654                         qp_context->rlkey_arbel_sched_queue |= sched_queue;
655                 else
656                         qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
657
658                 qp_param->opt_param_mask |=
659                         cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
660         }
661
662         if (attr_mask & IB_QP_TIMEOUT) {
663                 qp_context->pri_path.ackto = attr->timeout << 3;
664                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
665         }
666
667         if (attr_mask & IB_QP_ALT_PATH) {
668                 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
669                         mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
670                                   attr->alt_pkey_index, dev->limits.pkey_table_len-1);
671                         goto out_mailbox;
672                 }
673
674                 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
675                         mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
676                                 attr->alt_port_num);
677                         goto out_mailbox;
678                 }
679
680                 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
681                                    attr->alt_ah_attr.port_num))
682                         goto out_mailbox;
683
684                 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
685                                                               attr->alt_port_num << 24);
686                 qp_context->alt_path.ackto = attr->alt_timeout << 3;
687                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
688         }
689
690         /* leave rdd as 0 */
691         qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
692         /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
693         qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);
694         qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
695                                              (MTHCA_FLIGHT_LIMIT << 24) |
696                                              MTHCA_QP_BIT_SWE);
697         if (qp->sq_policy == IB_SIGNAL_ALL_WR)
698                 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
699         if (attr_mask & IB_QP_RETRY_CNT) {
700                 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
701                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
702         }
703
704         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
705                 if (attr->max_rd_atomic) {
706                         qp_context->params1 |=
707                                 cpu_to_be32(MTHCA_QP_BIT_SRE |
708                                             MTHCA_QP_BIT_SAE);
709                         qp_context->params1 |=
710                                 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
711                 }
712                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
713         }
714
715         if (attr_mask & IB_QP_SQ_PSN)
716                 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
717         qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
718
719         if (mthca_is_memfree(dev)) {
720                 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
721                 qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);
722         }
723
724         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
725                 if (attr->max_dest_rd_atomic)
726                         qp_context->params2 |=
727                                 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
728
729                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
730         }
731
732         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
733                 qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);
734                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
735                                                         MTHCA_QP_OPTPAR_RRE |
736                                                         MTHCA_QP_OPTPAR_RAE);
737         }
738
739         qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
740
741         if (ibqp->srq)
742                 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
743
744         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
745                 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
746                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
747         }
748         if (attr_mask & IB_QP_RQ_PSN)
749                 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
750
751         qp_context->ra_buff_indx =
752                 cpu_to_be32(dev->qp_table.rdb_base +
753                             ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
754                              dev->qp_table.rdb_shift));
755
756         qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
757
758         if (mthca_is_memfree(dev))
759                 qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);
760
761         if (attr_mask & IB_QP_QKEY) {
762                 qp_context->qkey = cpu_to_be32(attr->qkey);
763                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
764         }
765
766         if (ibqp->srq)
767                 qp_context->srqn = cpu_to_be32(1 << 24 |
768                                                to_msrq(ibqp->srq)->srqn);
769
770         if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
771             attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY               &&
772             attr->en_sqd_async_notify)
773                 sqd_event = 1 << 31;
774
775         err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
776                               mailbox, sqd_event);
777         if (err) {
778                 mthca_warn(dev, "modify QP %d->%d returned %d.\n",
779                            cur_state, new_state, err);
780                 goto out_mailbox;
781         }
782
783         qp->state = new_state;
784         if (attr_mask & IB_QP_ACCESS_FLAGS)
785                 qp->atomic_rd_en = attr->qp_access_flags;
786         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
787                 qp->resp_depth = attr->max_dest_rd_atomic;
788         if (attr_mask & IB_QP_PORT)
789                 qp->port = attr->port_num;
790         if (attr_mask & IB_QP_ALT_PATH)
791                 qp->alt_port = attr->alt_port_num;
792
793         if (is_sqp(dev, qp))
794                 store_attrs(to_msqp(qp), attr, attr_mask);
795
796         /*
797          * If we moved QP0 to RTR, bring the IB link up; if we moved
798          * QP0 to RESET or ERROR, bring the link back down.
799          */
800         if (is_qp0(dev, qp)) {
801                 if (cur_state != IB_QPS_RTR &&
802                     new_state == IB_QPS_RTR)
803                         init_port(dev, qp->port);
804
805                 if (cur_state != IB_QPS_RESET &&
806                     cur_state != IB_QPS_ERR &&
807                     (new_state == IB_QPS_RESET ||
808                      new_state == IB_QPS_ERR))
809                         mthca_CLOSE_IB(dev, qp->port);
810         }
811
812         /*
813          * If we moved a kernel QP to RESET, clean up all old CQ
814          * entries and reinitialize the QP.
815          */
816         if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
817                 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
818                                qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
819                 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
820                         mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
821
822                 mthca_wq_reset(&qp->sq);
823                 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
824
825                 mthca_wq_reset(&qp->rq);
826                 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
827
828                 if (mthca_is_memfree(dev)) {
829                         *qp->sq.db = 0;
830                         *qp->rq.db = 0;
831                 }
832         }
833
834 out_mailbox:
835         mthca_free_mailbox(dev, mailbox);
836 out:
837         return err;
838 }
839
840 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
841                     struct ib_udata *udata)
842 {
843         struct mthca_dev *dev = to_mdev(ibqp->device);
844         struct mthca_qp *qp = to_mqp(ibqp);
845         enum ib_qp_state cur_state, new_state;
846         int err = -EINVAL;
847
848         mutex_lock(&qp->mutex);
849         if (attr_mask & IB_QP_CUR_STATE) {
850                 cur_state = attr->cur_qp_state;
851         } else {
852                 spin_lock_irq(&qp->sq.lock);
853                 spin_lock(&qp->rq.lock);
854                 cur_state = qp->state;
855                 spin_unlock(&qp->rq.lock);
856                 spin_unlock_irq(&qp->sq.lock);
857         }
858
859         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
860
861         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
862                 mthca_dbg(dev, "Bad QP transition (transport %d) "
863                           "%d->%d with attr 0x%08x\n",
864                           qp->transport, cur_state, new_state,
865                           attr_mask);
866                 goto out;
867         }
868
869         if ((attr_mask & IB_QP_PKEY_INDEX) &&
870              attr->pkey_index >= dev->limits.pkey_table_len) {
871                 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
872                           attr->pkey_index, dev->limits.pkey_table_len-1);
873                 goto out;
874         }
875
876         if ((attr_mask & IB_QP_PORT) &&
877             (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
878                 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
879                 goto out;
880         }
881
882         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
883             attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
884                 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
885                           attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
886                 goto out;
887         }
888
889         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
890             attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
891                 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
892                           attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
893                 goto out;
894         }
895
896         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
897                 err = 0;
898                 goto out;
899         }
900
901         err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
902
903 out:
904         mutex_unlock(&qp->mutex);
905         return err;
906 }
907
908 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
909 {
910         /*
911          * Calculate the maximum size of WQE s/g segments, excluding
912          * the next segment and other non-data segments.
913          */
914         int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
915
916         switch (qp->transport) {
917         case MLX:
918                 max_data_size -= 2 * sizeof (struct mthca_data_seg);
919                 break;
920
921         case UD:
922                 if (mthca_is_memfree(dev))
923                         max_data_size -= sizeof (struct mthca_arbel_ud_seg);
924                 else
925                         max_data_size -= sizeof (struct mthca_tavor_ud_seg);
926                 break;
927
928         default:
929                 max_data_size -= sizeof (struct mthca_raddr_seg);
930                 break;
931         }
932
933         return max_data_size;
934 }
935
936 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
937 {
938         /* We don't support inline data for kernel QPs (yet). */
939         return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
940 }
941
942 static void mthca_adjust_qp_caps(struct mthca_dev *dev,
943                                  struct mthca_pd *pd,
944                                  struct mthca_qp *qp)
945 {
946         int max_data_size = mthca_max_data_size(dev, qp,
947                                                 min(dev->limits.max_desc_sz,
948                                                     1 << qp->sq.wqe_shift));
949
950         qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
951
952         qp->sq.max_gs = min_t(int, dev->limits.max_sg,
953                               max_data_size / sizeof (struct mthca_data_seg));
954         qp->rq.max_gs = min_t(int, dev->limits.max_sg,
955                                (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
956                                 sizeof (struct mthca_next_seg)) /
957                                sizeof (struct mthca_data_seg));
958 }
959
960 /*
961  * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
962  * rq.max_gs and sq.max_gs must all be assigned.
963  * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
964  * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
965  * queue)
966  */
967 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
968                                struct mthca_pd *pd,
969                                struct mthca_qp *qp)
970 {
971         int size;
972         int err = -ENOMEM;
973
974         size = sizeof (struct mthca_next_seg) +
975                 qp->rq.max_gs * sizeof (struct mthca_data_seg);
976
977         if (size > dev->limits.max_desc_sz)
978                 return -EINVAL;
979
980         for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
981              qp->rq.wqe_shift++)
982                 ; /* nothing */
983
984         size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
985         switch (qp->transport) {
986         case MLX:
987                 size += 2 * sizeof (struct mthca_data_seg);
988                 break;
989
990         case UD:
991                 size += mthca_is_memfree(dev) ?
992                         sizeof (struct mthca_arbel_ud_seg) :
993                         sizeof (struct mthca_tavor_ud_seg);
994                 break;
995
996         case UC:
997                 size += sizeof (struct mthca_raddr_seg);
998                 break;
999
1000         case RC:
1001                 size += sizeof (struct mthca_raddr_seg);
1002                 /*
1003                  * An atomic op will require an atomic segment, a
1004                  * remote address segment and one scatter entry.
1005                  */
1006                 size = max_t(int, size,
1007                              sizeof (struct mthca_atomic_seg) +
1008                              sizeof (struct mthca_raddr_seg) +
1009                              sizeof (struct mthca_data_seg));
1010                 break;
1011
1012         default:
1013                 break;
1014         }
1015
1016         /* Make sure that we have enough space for a bind request */
1017         size = max_t(int, size, sizeof (struct mthca_bind_seg));
1018
1019         size += sizeof (struct mthca_next_seg);
1020
1021         if (size > dev->limits.max_desc_sz)
1022                 return -EINVAL;
1023
1024         for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1025              qp->sq.wqe_shift++)
1026                 ; /* nothing */
1027
1028         qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1029                                     1 << qp->sq.wqe_shift);
1030
1031         /*
1032          * If this is a userspace QP, we don't actually have to
1033          * allocate anything.  All we need is to calculate the WQE
1034          * sizes and the send_wqe_offset, so we're done now.
1035          */
1036         if (pd->ibpd.uobject)
1037                 return 0;
1038
1039         size = PAGE_ALIGN(qp->send_wqe_offset +
1040                           (qp->sq.max << qp->sq.wqe_shift));
1041
1042         qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1043                            GFP_KERNEL);
1044         if (!qp->wrid)
1045                 goto err_out;
1046
1047         err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1048                               &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1049         if (err)
1050                 goto err_out;
1051
1052         return 0;
1053
1054 err_out:
1055         kfree(qp->wrid);
1056         return err;
1057 }
1058
1059 static void mthca_free_wqe_buf(struct mthca_dev *dev,
1060                                struct mthca_qp *qp)
1061 {
1062         mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1063                                        (qp->sq.max << qp->sq.wqe_shift)),
1064                        &qp->queue, qp->is_direct, &qp->mr);
1065         kfree(qp->wrid);
1066 }
1067
1068 static int mthca_map_memfree(struct mthca_dev *dev,
1069                              struct mthca_qp *qp)
1070 {
1071         int ret;
1072
1073         if (mthca_is_memfree(dev)) {
1074                 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1075                 if (ret)
1076                         return ret;
1077
1078                 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1079                 if (ret)
1080                         goto err_qpc;
1081
1082                 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1083                                       qp->qpn << dev->qp_table.rdb_shift);
1084                 if (ret)
1085                         goto err_eqpc;
1086
1087         }
1088
1089         return 0;
1090
1091 err_eqpc:
1092         mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1093
1094 err_qpc:
1095         mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1096
1097         return ret;
1098 }
1099
1100 static void mthca_unmap_memfree(struct mthca_dev *dev,
1101                                 struct mthca_qp *qp)
1102 {
1103         mthca_table_put(dev, dev->qp_table.rdb_table,
1104                         qp->qpn << dev->qp_table.rdb_shift);
1105         mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1106         mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1107 }
1108
1109 static int mthca_alloc_memfree(struct mthca_dev *dev,
1110                                struct mthca_qp *qp)
1111 {
1112         if (mthca_is_memfree(dev)) {
1113                 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1114                                                  qp->qpn, &qp->rq.db);
1115                 if (qp->rq.db_index < 0)
1116                         return -ENOMEM;
1117
1118                 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1119                                                  qp->qpn, &qp->sq.db);
1120                 if (qp->sq.db_index < 0) {
1121                         mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1122                         return -ENOMEM;
1123                 }
1124         }
1125
1126         return 0;
1127 }
1128
1129 static void mthca_free_memfree(struct mthca_dev *dev,
1130                                struct mthca_qp *qp)
1131 {
1132         if (mthca_is_memfree(dev)) {
1133                 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1134                 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1135         }
1136 }
1137
1138 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1139                                  struct mthca_pd *pd,
1140                                  struct mthca_cq *send_cq,
1141                                  struct mthca_cq *recv_cq,
1142                                  enum ib_sig_type send_policy,
1143                                  struct mthca_qp *qp)
1144 {
1145         int ret;
1146         int i;
1147         struct mthca_next_seg *next;
1148
1149         qp->refcount = 1;
1150         init_waitqueue_head(&qp->wait);
1151         mutex_init(&qp->mutex);
1152         qp->state        = IB_QPS_RESET;
1153         qp->atomic_rd_en = 0;
1154         qp->resp_depth   = 0;
1155         qp->sq_policy    = send_policy;
1156         mthca_wq_reset(&qp->sq);
1157         mthca_wq_reset(&qp->rq);
1158
1159         spin_lock_init(&qp->sq.lock);
1160         spin_lock_init(&qp->rq.lock);
1161
1162         ret = mthca_map_memfree(dev, qp);
1163         if (ret)
1164                 return ret;
1165
1166         ret = mthca_alloc_wqe_buf(dev, pd, qp);
1167         if (ret) {
1168                 mthca_unmap_memfree(dev, qp);
1169                 return ret;
1170         }
1171
1172         mthca_adjust_qp_caps(dev, pd, qp);
1173
1174         /*
1175          * If this is a userspace QP, we're done now.  The doorbells
1176          * will be allocated and buffers will be initialized in
1177          * userspace.
1178          */
1179         if (pd->ibpd.uobject)
1180                 return 0;
1181
1182         ret = mthca_alloc_memfree(dev, qp);
1183         if (ret) {
1184                 mthca_free_wqe_buf(dev, qp);
1185                 mthca_unmap_memfree(dev, qp);
1186                 return ret;
1187         }
1188
1189         if (mthca_is_memfree(dev)) {
1190                 struct mthca_data_seg *scatter;
1191                 int size = (sizeof (struct mthca_next_seg) +
1192                             qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1193
1194                 for (i = 0; i < qp->rq.max; ++i) {
1195                         next = get_recv_wqe(qp, i);
1196                         next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1197                                                    qp->rq.wqe_shift);
1198                         next->ee_nds = cpu_to_be32(size);
1199
1200                         for (scatter = (void *) (next + 1);
1201                              (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1202                              ++scatter)
1203                                 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1204                 }
1205
1206                 for (i = 0; i < qp->sq.max; ++i) {
1207                         next = get_send_wqe(qp, i);
1208                         next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1209                                                     qp->sq.wqe_shift) +
1210                                                    qp->send_wqe_offset);
1211                 }
1212         } else {
1213                 for (i = 0; i < qp->rq.max; ++i) {
1214                         next = get_recv_wqe(qp, i);
1215                         next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1216                                               qp->rq.wqe_shift) | 1);
1217                 }
1218
1219         }
1220
1221         qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1222         qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1223
1224         return 0;
1225 }
1226
1227 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1228                              struct mthca_pd *pd, struct mthca_qp *qp)
1229 {
1230         int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1231
1232         /* Sanity check QP size before proceeding */
1233         if (cap->max_send_wr     > dev->limits.max_wqes ||
1234             cap->max_recv_wr     > dev->limits.max_wqes ||
1235             cap->max_send_sge    > dev->limits.max_sg   ||
1236             cap->max_recv_sge    > dev->limits.max_sg   ||
1237             cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1238                 return -EINVAL;
1239
1240         /*
1241          * For MLX transport we need 2 extra send gather entries:
1242          * one for the header and one for the checksum at the end
1243          */
1244         if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
1245                 return -EINVAL;
1246
1247         if (mthca_is_memfree(dev)) {
1248                 qp->rq.max = cap->max_recv_wr ?
1249                         roundup_pow_of_two(cap->max_recv_wr) : 0;
1250                 qp->sq.max = cap->max_send_wr ?
1251                         roundup_pow_of_two(cap->max_send_wr) : 0;
1252         } else {
1253                 qp->rq.max = cap->max_recv_wr;
1254                 qp->sq.max = cap->max_send_wr;
1255         }
1256
1257         qp->rq.max_gs = cap->max_recv_sge;
1258         qp->sq.max_gs = max_t(int, cap->max_send_sge,
1259                               ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1260                                     MTHCA_INLINE_CHUNK_SIZE) /
1261                               sizeof (struct mthca_data_seg));
1262
1263         return 0;
1264 }
1265
1266 int mthca_alloc_qp(struct mthca_dev *dev,
1267                    struct mthca_pd *pd,
1268                    struct mthca_cq *send_cq,
1269                    struct mthca_cq *recv_cq,
1270                    enum ib_qp_type type,
1271                    enum ib_sig_type send_policy,
1272                    struct ib_qp_cap *cap,
1273                    struct mthca_qp *qp)
1274 {
1275         int err;
1276
1277         switch (type) {
1278         case IB_QPT_RC: qp->transport = RC; break;
1279         case IB_QPT_UC: qp->transport = UC; break;
1280         case IB_QPT_UD: qp->transport = UD; break;
1281         default: return -EINVAL;
1282         }
1283
1284         err = mthca_set_qp_size(dev, cap, pd, qp);
1285         if (err)
1286                 return err;
1287
1288         qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1289         if (qp->qpn == -1)
1290                 return -ENOMEM;
1291
1292         /* initialize port to zero for error-catching. */
1293         qp->port = 0;
1294
1295         err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1296                                     send_policy, qp);
1297         if (err) {
1298                 mthca_free(&dev->qp_table.alloc, qp->qpn);
1299                 return err;
1300         }
1301
1302         spin_lock_irq(&dev->qp_table.lock);
1303         mthca_array_set(&dev->qp_table.qp,
1304                         qp->qpn & (dev->limits.num_qps - 1), qp);
1305         spin_unlock_irq(&dev->qp_table.lock);
1306
1307         return 0;
1308 }
1309
1310 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1311         __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1312 {
1313         if (send_cq == recv_cq) {
1314                 spin_lock_irq(&send_cq->lock);
1315                 __acquire(&recv_cq->lock);
1316         } else if (send_cq->cqn < recv_cq->cqn) {
1317                 spin_lock_irq(&send_cq->lock);
1318                 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1319         } else {
1320                 spin_lock_irq(&recv_cq->lock);
1321                 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1322         }
1323 }
1324
1325 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1326         __releases(&send_cq->lock) __releases(&recv_cq->lock)
1327 {
1328         if (send_cq == recv_cq) {
1329                 __release(&recv_cq->lock);
1330                 spin_unlock_irq(&send_cq->lock);
1331         } else if (send_cq->cqn < recv_cq->cqn) {
1332                 spin_unlock(&recv_cq->lock);
1333                 spin_unlock_irq(&send_cq->lock);
1334         } else {
1335                 spin_unlock(&send_cq->lock);
1336                 spin_unlock_irq(&recv_cq->lock);
1337         }
1338 }
1339
1340 int mthca_alloc_sqp(struct mthca_dev *dev,
1341                     struct mthca_pd *pd,
1342                     struct mthca_cq *send_cq,
1343                     struct mthca_cq *recv_cq,
1344                     enum ib_sig_type send_policy,
1345                     struct ib_qp_cap *cap,
1346                     int qpn,
1347                     int port,
1348                     struct mthca_sqp *sqp)
1349 {
1350         u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1351         int err;
1352
1353         sqp->qp.transport = MLX;
1354         err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1355         if (err)
1356                 return err;
1357
1358         sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1359         sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1360                                              &sqp->header_dma, GFP_KERNEL);
1361         if (!sqp->header_buf)
1362                 return -ENOMEM;
1363
1364         spin_lock_irq(&dev->qp_table.lock);
1365         if (mthca_array_get(&dev->qp_table.qp, mqpn))
1366                 err = -EBUSY;
1367         else
1368                 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1369         spin_unlock_irq(&dev->qp_table.lock);
1370
1371         if (err)
1372                 goto err_out;
1373
1374         sqp->qp.port      = port;
1375         sqp->qp.qpn       = mqpn;
1376         sqp->qp.transport = MLX;
1377
1378         err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1379                                     send_policy, &sqp->qp);
1380         if (err)
1381                 goto err_out_free;
1382
1383         atomic_inc(&pd->sqp_count);
1384
1385         return 0;
1386
1387  err_out_free:
1388         /*
1389          * Lock CQs here, so that CQ polling code can do QP lookup
1390          * without taking a lock.
1391          */
1392         mthca_lock_cqs(send_cq, recv_cq);
1393
1394         spin_lock(&dev->qp_table.lock);
1395         mthca_array_clear(&dev->qp_table.qp, mqpn);
1396         spin_unlock(&dev->qp_table.lock);
1397
1398         mthca_unlock_cqs(send_cq, recv_cq);
1399
1400  err_out:
1401         dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1402                           sqp->header_buf, sqp->header_dma);
1403
1404         return err;
1405 }
1406
1407 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1408 {
1409         int c;
1410
1411         spin_lock_irq(&dev->qp_table.lock);
1412         c = qp->refcount;
1413         spin_unlock_irq(&dev->qp_table.lock);
1414
1415         return c;
1416 }
1417
1418 void mthca_free_qp(struct mthca_dev *dev,
1419                    struct mthca_qp *qp)
1420 {
1421         struct mthca_cq *send_cq;
1422         struct mthca_cq *recv_cq;
1423
1424         send_cq = to_mcq(qp->ibqp.send_cq);
1425         recv_cq = to_mcq(qp->ibqp.recv_cq);
1426
1427         /*
1428          * Lock CQs here, so that CQ polling code can do QP lookup
1429          * without taking a lock.
1430          */
1431         mthca_lock_cqs(send_cq, recv_cq);
1432
1433         spin_lock(&dev->qp_table.lock);
1434         mthca_array_clear(&dev->qp_table.qp,
1435                           qp->qpn & (dev->limits.num_qps - 1));
1436         --qp->refcount;
1437         spin_unlock(&dev->qp_table.lock);
1438
1439         mthca_unlock_cqs(send_cq, recv_cq);
1440
1441         wait_event(qp->wait, !get_qp_refcount(dev, qp));
1442
1443         if (qp->state != IB_QPS_RESET)
1444                 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1445                                 NULL, 0);
1446
1447         /*
1448          * If this is a userspace QP, the buffers, MR, CQs and so on
1449          * will be cleaned up in userspace, so all we have to do is
1450          * unref the mem-free tables and free the QPN in our table.
1451          */
1452         if (!qp->ibqp.uobject) {
1453                 mthca_cq_clean(dev, recv_cq, qp->qpn,
1454                                qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1455                 if (send_cq != recv_cq)
1456                         mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1457
1458                 mthca_free_memfree(dev, qp);
1459                 mthca_free_wqe_buf(dev, qp);
1460         }
1461
1462         mthca_unmap_memfree(dev, qp);
1463
1464         if (is_sqp(dev, qp)) {
1465                 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1466                 dma_free_coherent(&dev->pdev->dev,
1467                                   to_msqp(qp)->header_buf_size,
1468                                   to_msqp(qp)->header_buf,
1469                                   to_msqp(qp)->header_dma);
1470         } else
1471                 mthca_free(&dev->qp_table.alloc, qp->qpn);
1472 }
1473
1474 /* Create UD header for an MLX send and build a data segment for it */
1475 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1476                             int ind, struct ib_send_wr *wr,
1477                             struct mthca_mlx_seg *mlx,
1478                             struct mthca_data_seg *data)
1479 {
1480         int header_size;
1481         int err;
1482         u16 pkey;
1483
1484         ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
1485                           mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
1486                           &sqp->ud_header);
1487
1488         err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1489         if (err)
1490                 return err;
1491         mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1492         mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1493                                   (sqp->ud_header.lrh.destination_lid ==
1494                                    IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1495                                   (sqp->ud_header.lrh.service_level << 8));
1496         mlx->rlid = sqp->ud_header.lrh.destination_lid;
1497         mlx->vcrc = 0;
1498
1499         switch (wr->opcode) {
1500         case IB_WR_SEND:
1501                 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1502                 sqp->ud_header.immediate_present = 0;
1503                 break;
1504         case IB_WR_SEND_WITH_IMM:
1505                 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1506                 sqp->ud_header.immediate_present = 1;
1507                 sqp->ud_header.immediate_data = wr->ex.imm_data;
1508                 break;
1509         default:
1510                 return -EINVAL;
1511         }
1512
1513         sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
1514         if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1515                 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1516         sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1517         if (!sqp->qp.ibqp.qp_num)
1518                 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1519                                    sqp->pkey_index, &pkey);
1520         else
1521                 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1522                                    wr->wr.ud.pkey_index, &pkey);
1523         sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1524         sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1525         sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1526         sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1527                                                sqp->qkey : wr->wr.ud.remote_qkey);
1528         sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1529
1530         header_size = ib_ud_header_pack(&sqp->ud_header,
1531                                         sqp->header_buf +
1532                                         ind * MTHCA_UD_HEADER_SIZE);
1533
1534         data->byte_count = cpu_to_be32(header_size);
1535         data->lkey       = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1536         data->addr       = cpu_to_be64(sqp->header_dma +
1537                                        ind * MTHCA_UD_HEADER_SIZE);
1538
1539         return 0;
1540 }
1541
1542 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1543                                     struct ib_cq *ib_cq)
1544 {
1545         unsigned cur;
1546         struct mthca_cq *cq;
1547
1548         cur = wq->head - wq->tail;
1549         if (likely(cur + nreq < wq->max))
1550                 return 0;
1551
1552         cq = to_mcq(ib_cq);
1553         spin_lock(&cq->lock);
1554         cur = wq->head - wq->tail;
1555         spin_unlock(&cq->lock);
1556
1557         return cur + nreq >= wq->max;
1558 }
1559
1560 static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1561                                           u64 remote_addr, u32 rkey)
1562 {
1563         rseg->raddr    = cpu_to_be64(remote_addr);
1564         rseg->rkey     = cpu_to_be32(rkey);
1565         rseg->reserved = 0;
1566 }
1567
1568 static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1569                                            struct ib_send_wr *wr)
1570 {
1571         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1572                 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1573                 aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
1574         } else {
1575                 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1576                 aseg->compare  = 0;
1577         }
1578
1579 }
1580
1581 static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1582                              struct ib_send_wr *wr)
1583 {
1584         useg->lkey    = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1585         useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1586         useg->dqpn    = cpu_to_be32(wr->wr.ud.remote_qpn);
1587         useg->qkey    = cpu_to_be32(wr->wr.ud.remote_qkey);
1588
1589 }
1590
1591 static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1592                              struct ib_send_wr *wr)
1593 {
1594         memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1595         useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1596         useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1597 }
1598
1599 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1600                           struct ib_send_wr **bad_wr)
1601 {
1602         struct mthca_dev *dev = to_mdev(ibqp->device);
1603         struct mthca_qp *qp = to_mqp(ibqp);
1604         void *wqe;
1605         void *prev_wqe;
1606         unsigned long flags;
1607         int err = 0;
1608         int nreq;
1609         int i;
1610         int size;
1611         /*
1612          * f0 and size0 are only used if nreq != 0, and they will
1613          * always be initialized the first time through the main loop
1614          * before nreq is incremented.  So nreq cannot become non-zero
1615          * without initializing f0 and size0, and they are in fact
1616          * never used uninitialized.
1617          */
1618         int uninitialized_var(size0);
1619         u32 uninitialized_var(f0);
1620         int ind;
1621         u8 op0 = 0;
1622
1623         spin_lock_irqsave(&qp->sq.lock, flags);
1624
1625         /* XXX check that state is OK to post send */
1626
1627         ind = qp->sq.next_ind;
1628
1629         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1630                 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1631                         mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1632                                         " %d max, %d nreq)\n", qp->qpn,
1633                                         qp->sq.head, qp->sq.tail,
1634                                         qp->sq.max, nreq);
1635                         err = -ENOMEM;
1636                         *bad_wr = wr;
1637                         goto out;
1638                 }
1639
1640                 wqe = get_send_wqe(qp, ind);
1641                 prev_wqe = qp->sq.last;
1642                 qp->sq.last = wqe;
1643
1644                 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1645                 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1646                 ((struct mthca_next_seg *) wqe)->flags =
1647                         ((wr->send_flags & IB_SEND_SIGNALED) ?
1648                          cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1649                         ((wr->send_flags & IB_SEND_SOLICITED) ?
1650                          cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1651                         cpu_to_be32(1);
1652                 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1653                     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1654                         ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1655
1656                 wqe += sizeof (struct mthca_next_seg);
1657                 size = sizeof (struct mthca_next_seg) / 16;
1658
1659                 switch (qp->transport) {
1660                 case RC:
1661                         switch (wr->opcode) {
1662                         case IB_WR_ATOMIC_CMP_AND_SWP:
1663                         case IB_WR_ATOMIC_FETCH_AND_ADD:
1664                                 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1665                                               wr->wr.atomic.rkey);
1666                                 wqe += sizeof (struct mthca_raddr_seg);
1667
1668                                 set_atomic_seg(wqe, wr);
1669                                 wqe += sizeof (struct mthca_atomic_seg);
1670                                 size += (sizeof (struct mthca_raddr_seg) +
1671                                          sizeof (struct mthca_atomic_seg)) / 16;
1672                                 break;
1673
1674                         case IB_WR_RDMA_WRITE:
1675                         case IB_WR_RDMA_WRITE_WITH_IMM:
1676                         case IB_WR_RDMA_READ:
1677                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1678                                               wr->wr.rdma.rkey);
1679                                 wqe  += sizeof (struct mthca_raddr_seg);
1680                                 size += sizeof (struct mthca_raddr_seg) / 16;
1681                                 break;
1682
1683                         default:
1684                                 /* No extra segments required for sends */
1685                                 break;
1686                         }
1687
1688                         break;
1689
1690                 case UC:
1691                         switch (wr->opcode) {
1692                         case IB_WR_RDMA_WRITE:
1693                         case IB_WR_RDMA_WRITE_WITH_IMM:
1694                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1695                                               wr->wr.rdma.rkey);
1696                                 wqe  += sizeof (struct mthca_raddr_seg);
1697                                 size += sizeof (struct mthca_raddr_seg) / 16;
1698                                 break;
1699
1700                         default:
1701                                 /* No extra segments required for sends */
1702                                 break;
1703                         }
1704
1705                         break;
1706
1707                 case UD:
1708                         set_tavor_ud_seg(wqe, wr);
1709                         wqe  += sizeof (struct mthca_tavor_ud_seg);
1710                         size += sizeof (struct mthca_tavor_ud_seg) / 16;
1711                         break;
1712
1713                 case MLX:
1714                         err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1715                                                wqe - sizeof (struct mthca_next_seg),
1716                                                wqe);
1717                         if (err) {
1718                                 *bad_wr = wr;
1719                                 goto out;
1720                         }
1721                         wqe += sizeof (struct mthca_data_seg);
1722                         size += sizeof (struct mthca_data_seg) / 16;
1723                         break;
1724                 }
1725
1726                 if (wr->num_sge > qp->sq.max_gs) {
1727                         mthca_err(dev, "too many gathers\n");
1728                         err = -EINVAL;
1729                         *bad_wr = wr;
1730                         goto out;
1731                 }
1732
1733                 for (i = 0; i < wr->num_sge; ++i) {
1734                         mthca_set_data_seg(wqe, wr->sg_list + i);
1735                         wqe  += sizeof (struct mthca_data_seg);
1736                         size += sizeof (struct mthca_data_seg) / 16;
1737                 }
1738
1739                 /* Add one more inline data segment for ICRC */
1740                 if (qp->transport == MLX) {
1741                         ((struct mthca_data_seg *) wqe)->byte_count =
1742                                 cpu_to_be32((1 << 31) | 4);
1743                         ((u32 *) wqe)[1] = 0;
1744                         wqe += sizeof (struct mthca_data_seg);
1745                         size += sizeof (struct mthca_data_seg) / 16;
1746                 }
1747
1748                 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1749
1750                 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1751                         mthca_err(dev, "opcode invalid\n");
1752                         err = -EINVAL;
1753                         *bad_wr = wr;
1754                         goto out;
1755                 }
1756
1757                 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1758                         cpu_to_be32(((ind << qp->sq.wqe_shift) +
1759                                      qp->send_wqe_offset) |
1760                                     mthca_opcode[wr->opcode]);
1761                 wmb();
1762                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1763                         cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
1764                                     ((wr->send_flags & IB_SEND_FENCE) ?
1765                                     MTHCA_NEXT_FENCE : 0));
1766
1767                 if (!nreq) {
1768                         size0 = size;
1769                         op0   = mthca_opcode[wr->opcode];
1770                         f0    = wr->send_flags & IB_SEND_FENCE ?
1771                                 MTHCA_SEND_DOORBELL_FENCE : 0;
1772                 }
1773
1774                 ++ind;
1775                 if (unlikely(ind >= qp->sq.max))
1776                         ind -= qp->sq.max;
1777         }
1778
1779 out:
1780         if (likely(nreq)) {
1781                 wmb();
1782
1783                 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
1784                                qp->send_wqe_offset) | f0 | op0,
1785                               (qp->qpn << 8) | size0,
1786                               dev->kar + MTHCA_SEND_DOORBELL,
1787                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1788                 /*
1789                  * Make sure doorbells don't leak out of SQ spinlock
1790                  * and reach the HCA out of order:
1791                  */
1792                 mmiowb();
1793         }
1794
1795         qp->sq.next_ind = ind;
1796         qp->sq.head    += nreq;
1797
1798         spin_unlock_irqrestore(&qp->sq.lock, flags);
1799         return err;
1800 }
1801
1802 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1803                              struct ib_recv_wr **bad_wr)
1804 {
1805         struct mthca_dev *dev = to_mdev(ibqp->device);
1806         struct mthca_qp *qp = to_mqp(ibqp);
1807         unsigned long flags;
1808         int err = 0;
1809         int nreq;
1810         int i;
1811         int size;
1812         /*
1813          * size0 is only used if nreq != 0, and it will always be
1814          * initialized the first time through the main loop before
1815          * nreq is incremented.  So nreq cannot become non-zero
1816          * without initializing size0, and it is in fact never used
1817          * uninitialized.
1818          */
1819         int uninitialized_var(size0);
1820         int ind;
1821         void *wqe;
1822         void *prev_wqe;
1823
1824         spin_lock_irqsave(&qp->rq.lock, flags);
1825
1826         /* XXX check that state is OK to post receive */
1827
1828         ind = qp->rq.next_ind;
1829
1830         for (nreq = 0; wr; wr = wr->next) {
1831                 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1832                         mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1833                                         " %d max, %d nreq)\n", qp->qpn,
1834                                         qp->rq.head, qp->rq.tail,
1835                                         qp->rq.max, nreq);
1836                         err = -ENOMEM;
1837                         *bad_wr = wr;
1838                         goto out;
1839                 }
1840
1841                 wqe = get_recv_wqe(qp, ind);
1842                 prev_wqe = qp->rq.last;
1843                 qp->rq.last = wqe;
1844
1845                 ((struct mthca_next_seg *) wqe)->ee_nds =
1846                         cpu_to_be32(MTHCA_NEXT_DBD);
1847                 ((struct mthca_next_seg *) wqe)->flags = 0;
1848
1849                 wqe += sizeof (struct mthca_next_seg);
1850                 size = sizeof (struct mthca_next_seg) / 16;
1851
1852                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1853                         err = -EINVAL;
1854                         *bad_wr = wr;
1855                         goto out;
1856                 }
1857
1858                 for (i = 0; i < wr->num_sge; ++i) {
1859                         mthca_set_data_seg(wqe, wr->sg_list + i);
1860                         wqe  += sizeof (struct mthca_data_seg);
1861                         size += sizeof (struct mthca_data_seg) / 16;
1862                 }
1863
1864                 qp->wrid[ind] = wr->wr_id;
1865
1866                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1867                         cpu_to_be32(MTHCA_NEXT_DBD | size);
1868
1869                 if (!nreq)
1870                         size0 = size;
1871
1872                 ++ind;
1873                 if (unlikely(ind >= qp->rq.max))
1874                         ind -= qp->rq.max;
1875
1876                 ++nreq;
1877                 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1878                         nreq = 0;
1879
1880                         wmb();
1881
1882                         mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1883                                       qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
1884                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1885
1886                         qp->rq.next_ind = ind;
1887                         qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1888                 }
1889         }
1890
1891 out:
1892         if (likely(nreq)) {
1893                 wmb();
1894
1895                 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1896                               qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
1897                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1898         }
1899
1900         qp->rq.next_ind = ind;
1901         qp->rq.head    += nreq;
1902
1903         /*
1904          * Make sure doorbells don't leak out of RQ spinlock and reach
1905          * the HCA out of order:
1906          */
1907         mmiowb();
1908
1909         spin_unlock_irqrestore(&qp->rq.lock, flags);
1910         return err;
1911 }
1912
1913 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1914                           struct ib_send_wr **bad_wr)
1915 {
1916         struct mthca_dev *dev = to_mdev(ibqp->device);
1917         struct mthca_qp *qp = to_mqp(ibqp);
1918         u32 dbhi;
1919         void *wqe;
1920         void *prev_wqe;
1921         unsigned long flags;
1922         int err = 0;
1923         int nreq;
1924         int i;
1925         int size;
1926         /*
1927          * f0 and size0 are only used if nreq != 0, and they will
1928          * always be initialized the first time through the main loop
1929          * before nreq is incremented.  So nreq cannot become non-zero
1930          * without initializing f0 and size0, and they are in fact
1931          * never used uninitialized.
1932          */
1933         int uninitialized_var(size0);
1934         u32 uninitialized_var(f0);
1935         int ind;
1936         u8 op0 = 0;
1937
1938         spin_lock_irqsave(&qp->sq.lock, flags);
1939
1940         /* XXX check that state is OK to post send */
1941
1942         ind = qp->sq.head & (qp->sq.max - 1);
1943
1944         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1945                 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1946                         nreq = 0;
1947
1948                         dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1949                                 ((qp->sq.head & 0xffff) << 8) | f0 | op0;
1950
1951                         qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1952
1953                         /*
1954                          * Make sure that descriptors are written before
1955                          * doorbell record.
1956                          */
1957                         wmb();
1958                         *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1959
1960                         /*
1961                          * Make sure doorbell record is written before we
1962                          * write MMIO send doorbell.
1963                          */
1964                         wmb();
1965
1966                         mthca_write64(dbhi, (qp->qpn << 8) | size0,
1967                                       dev->kar + MTHCA_SEND_DOORBELL,
1968                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1969                 }
1970
1971                 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1972                         mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1973                                         " %d max, %d nreq)\n", qp->qpn,
1974                                         qp->sq.head, qp->sq.tail,
1975                                         qp->sq.max, nreq);
1976                         err = -ENOMEM;
1977                         *bad_wr = wr;
1978                         goto out;
1979                 }
1980
1981                 wqe = get_send_wqe(qp, ind);
1982                 prev_wqe = qp->sq.last;
1983                 qp->sq.last = wqe;
1984
1985                 ((struct mthca_next_seg *) wqe)->flags =
1986                         ((wr->send_flags & IB_SEND_SIGNALED) ?
1987                          cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1988                         ((wr->send_flags & IB_SEND_SOLICITED) ?
1989                          cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1990                         ((wr->send_flags & IB_SEND_IP_CSUM) ?
1991                          cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
1992                         cpu_to_be32(1);
1993                 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1994                     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1995                         ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1996
1997                 wqe += sizeof (struct mthca_next_seg);
1998                 size = sizeof (struct mthca_next_seg) / 16;
1999
2000                 switch (qp->transport) {
2001                 case RC:
2002                         switch (wr->opcode) {
2003                         case IB_WR_ATOMIC_CMP_AND_SWP:
2004                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2005                                 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2006                                               wr->wr.atomic.rkey);
2007                                 wqe += sizeof (struct mthca_raddr_seg);
2008
2009                                 set_atomic_seg(wqe, wr);
2010                                 wqe  += sizeof (struct mthca_atomic_seg);
2011                                 size += (sizeof (struct mthca_raddr_seg) +
2012                                          sizeof (struct mthca_atomic_seg)) / 16;
2013                                 break;
2014
2015                         case IB_WR_RDMA_READ:
2016                         case IB_WR_RDMA_WRITE:
2017                         case IB_WR_RDMA_WRITE_WITH_IMM:
2018                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2019                                               wr->wr.rdma.rkey);
2020                                 wqe  += sizeof (struct mthca_raddr_seg);
2021                                 size += sizeof (struct mthca_raddr_seg) / 16;
2022                                 break;
2023
2024                         default:
2025                                 /* No extra segments required for sends */
2026                                 break;
2027                         }
2028
2029                         break;
2030
2031                 case UC:
2032                         switch (wr->opcode) {
2033                         case IB_WR_RDMA_WRITE:
2034                         case IB_WR_RDMA_WRITE_WITH_IMM:
2035                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2036                                               wr->wr.rdma.rkey);
2037                                 wqe  += sizeof (struct mthca_raddr_seg);
2038                                 size += sizeof (struct mthca_raddr_seg) / 16;
2039                                 break;
2040
2041                         default:
2042                                 /* No extra segments required for sends */
2043                                 break;
2044                         }
2045
2046                         break;
2047
2048                 case UD:
2049                         set_arbel_ud_seg(wqe, wr);
2050                         wqe  += sizeof (struct mthca_arbel_ud_seg);
2051                         size += sizeof (struct mthca_arbel_ud_seg) / 16;
2052                         break;
2053
2054                 case MLX:
2055                         err = build_mlx_header(dev, to_msqp(qp), ind, wr,
2056                                                wqe - sizeof (struct mthca_next_seg),
2057                                                wqe);
2058                         if (err) {
2059                                 *bad_wr = wr;
2060                                 goto out;
2061                         }
2062                         wqe += sizeof (struct mthca_data_seg);
2063                         size += sizeof (struct mthca_data_seg) / 16;
2064                         break;
2065                 }
2066
2067                 if (wr->num_sge > qp->sq.max_gs) {
2068                         mthca_err(dev, "too many gathers\n");
2069                         err = -EINVAL;
2070                         *bad_wr = wr;
2071                         goto out;
2072                 }
2073
2074                 for (i = 0; i < wr->num_sge; ++i) {
2075                         mthca_set_data_seg(wqe, wr->sg_list + i);
2076                         wqe  += sizeof (struct mthca_data_seg);
2077                         size += sizeof (struct mthca_data_seg) / 16;
2078                 }
2079
2080                 /* Add one more inline data segment for ICRC */
2081                 if (qp->transport == MLX) {
2082                         ((struct mthca_data_seg *) wqe)->byte_count =
2083                                 cpu_to_be32((1 << 31) | 4);
2084                         ((u32 *) wqe)[1] = 0;
2085                         wqe += sizeof (struct mthca_data_seg);
2086                         size += sizeof (struct mthca_data_seg) / 16;
2087                 }
2088
2089                 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2090
2091                 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2092                         mthca_err(dev, "opcode invalid\n");
2093                         err = -EINVAL;
2094                         *bad_wr = wr;
2095                         goto out;
2096                 }
2097
2098                 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2099                         cpu_to_be32(((ind << qp->sq.wqe_shift) +
2100                                      qp->send_wqe_offset) |
2101                                     mthca_opcode[wr->opcode]);
2102                 wmb();
2103                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2104                         cpu_to_be32(MTHCA_NEXT_DBD | size |
2105                                     ((wr->send_flags & IB_SEND_FENCE) ?
2106                                      MTHCA_NEXT_FENCE : 0));
2107
2108                 if (!nreq) {
2109                         size0 = size;
2110                         op0   = mthca_opcode[wr->opcode];
2111                         f0    = wr->send_flags & IB_SEND_FENCE ?
2112                                 MTHCA_SEND_DOORBELL_FENCE : 0;
2113                 }
2114
2115                 ++ind;
2116                 if (unlikely(ind >= qp->sq.max))
2117                         ind -= qp->sq.max;
2118         }
2119
2120 out:
2121         if (likely(nreq)) {
2122                 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
2123
2124                 qp->sq.head += nreq;
2125
2126                 /*
2127                  * Make sure that descriptors are written before
2128                  * doorbell record.
2129                  */
2130                 wmb();
2131                 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2132
2133                 /*
2134                  * Make sure doorbell record is written before we
2135                  * write MMIO send doorbell.
2136                  */
2137                 wmb();
2138
2139                 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
2140                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2141         }
2142
2143         /*
2144          * Make sure doorbells don't leak out of SQ spinlock and reach
2145          * the HCA out of order:
2146          */
2147         mmiowb();
2148
2149         spin_unlock_irqrestore(&qp->sq.lock, flags);
2150         return err;
2151 }
2152
2153 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2154                              struct ib_recv_wr **bad_wr)
2155 {
2156         struct mthca_dev *dev = to_mdev(ibqp->device);
2157         struct mthca_qp *qp = to_mqp(ibqp);
2158         unsigned long flags;
2159         int err = 0;
2160         int nreq;
2161         int ind;
2162         int i;
2163         void *wqe;
2164
2165         spin_lock_irqsave(&qp->rq.lock, flags);
2166
2167         /* XXX check that state is OK to post receive */
2168
2169         ind = qp->rq.head & (qp->rq.max - 1);
2170
2171         for (nreq = 0; wr; ++nreq, wr = wr->next) {
2172                 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2173                         mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2174                                         " %d max, %d nreq)\n", qp->qpn,
2175                                         qp->rq.head, qp->rq.tail,
2176                                         qp->rq.max, nreq);
2177                         err = -ENOMEM;
2178                         *bad_wr = wr;
2179                         goto out;
2180                 }
2181
2182                 wqe = get_recv_wqe(qp, ind);
2183
2184                 ((struct mthca_next_seg *) wqe)->flags = 0;
2185
2186                 wqe += sizeof (struct mthca_next_seg);
2187
2188                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2189                         err = -EINVAL;
2190                         *bad_wr = wr;
2191                         goto out;
2192                 }
2193
2194                 for (i = 0; i < wr->num_sge; ++i) {
2195                         mthca_set_data_seg(wqe, wr->sg_list + i);
2196                         wqe += sizeof (struct mthca_data_seg);
2197                 }
2198
2199                 if (i < qp->rq.max_gs)
2200                         mthca_set_data_seg_inval(wqe);
2201
2202                 qp->wrid[ind] = wr->wr_id;
2203
2204                 ++ind;
2205                 if (unlikely(ind >= qp->rq.max))
2206                         ind -= qp->rq.max;
2207         }
2208 out:
2209         if (likely(nreq)) {
2210                 qp->rq.head += nreq;
2211
2212                 /*
2213                  * Make sure that descriptors are written before
2214                  * doorbell record.
2215                  */
2216                 wmb();
2217                 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2218         }
2219
2220         spin_unlock_irqrestore(&qp->rq.lock, flags);
2221         return err;
2222 }
2223
2224 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2225                         int index, int *dbd, __be32 *new_wqe)
2226 {
2227         struct mthca_next_seg *next;
2228
2229         /*
2230          * For SRQs, all receive WQEs generate a CQE, so we're always
2231          * at the end of the doorbell chain.
2232          */
2233         if (qp->ibqp.srq && !is_send) {
2234                 *new_wqe = 0;
2235                 return;
2236         }
2237
2238         if (is_send)
2239                 next = get_send_wqe(qp, index);
2240         else
2241                 next = get_recv_wqe(qp, index);
2242
2243         *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2244         if (next->ee_nds & cpu_to_be32(0x3f))
2245                 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2246                         (next->ee_nds & cpu_to_be32(0x3f));
2247         else
2248                 *new_wqe = 0;
2249 }
2250
2251 int mthca_init_qp_table(struct mthca_dev *dev)
2252 {
2253         int err;
2254         int i;
2255
2256         spin_lock_init(&dev->qp_table.lock);
2257
2258         /*
2259          * We reserve 2 extra QPs per port for the special QPs.  The
2260          * special QP for port 1 has to be even, so round up.
2261          */
2262         dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2263         err = mthca_alloc_init(&dev->qp_table.alloc,
2264                                dev->limits.num_qps,
2265                                (1 << 24) - 1,
2266                                dev->qp_table.sqp_start +
2267                                MTHCA_MAX_PORTS * 2);
2268         if (err)
2269                 return err;
2270
2271         err = mthca_array_init(&dev->qp_table.qp,
2272                                dev->limits.num_qps);
2273         if (err) {
2274                 mthca_alloc_cleanup(&dev->qp_table.alloc);
2275                 return err;
2276         }
2277
2278         for (i = 0; i < 2; ++i) {
2279                 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2280                                     dev->qp_table.sqp_start + i * 2);
2281                 if (err) {
2282                         mthca_warn(dev, "CONF_SPECIAL_QP returned "
2283                                    "%d, aborting.\n", err);
2284                         goto err_out;
2285                 }
2286         }
2287         return 0;
2288
2289  err_out:
2290         for (i = 0; i < 2; ++i)
2291                 mthca_CONF_SPECIAL_QP(dev, i, 0);
2292
2293         mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2294         mthca_alloc_cleanup(&dev->qp_table.alloc);
2295
2296         return err;
2297 }
2298
2299 void mthca_cleanup_qp_table(struct mthca_dev *dev)
2300 {
2301         int i;
2302
2303         for (i = 0; i < 2; ++i)
2304                 mthca_CONF_SPECIAL_QP(dev, i, 0);
2305
2306         mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2307         mthca_alloc_cleanup(&dev->qp_table.alloc);
2308 }