[XFS] Fix merge failures
[pandora-kernel.git] / drivers / infiniband / hw / mlx4 / cq.c
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36
37 #include "mlx4_ib.h"
38 #include "user.h"
39
40 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
41 {
42         struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
43         ibcq->comp_handler(ibcq, ibcq->cq_context);
44 }
45
46 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
47 {
48         struct ib_event event;
49         struct ib_cq *ibcq;
50
51         if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
52                 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
53                        "on CQ %06x\n", type, cq->cqn);
54                 return;
55         }
56
57         ibcq = &to_mibcq(cq)->ibcq;
58         if (ibcq->event_handler) {
59                 event.device     = ibcq->device;
60                 event.event      = IB_EVENT_CQ_ERR;
61                 event.element.cq = ibcq;
62                 ibcq->event_handler(&event, ibcq->cq_context);
63         }
64 }
65
66 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
67 {
68         return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
69 }
70
71 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
72 {
73         return get_cqe_from_buf(&cq->buf, n);
74 }
75
76 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
77 {
78         struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
79
80         return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
81                 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
82 }
83
84 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
85 {
86         return get_sw_cqe(cq, cq->mcq.cons_index);
87 }
88
89 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
90 {
91         struct mlx4_ib_cq *mcq = to_mcq(cq);
92         struct mlx4_ib_dev *dev = to_mdev(cq->device);
93
94         return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
95 }
96
97 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
98 {
99         int err;
100
101         err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
102                              PAGE_SIZE * 2, &buf->buf);
103
104         if (err)
105                 goto out;
106
107         err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
108                                     &buf->mtt);
109         if (err)
110                 goto err_buf;
111
112         err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
113         if (err)
114                 goto err_mtt;
115
116         return 0;
117
118 err_mtt:
119         mlx4_mtt_cleanup(dev->dev, &buf->mtt);
120
121 err_buf:
122         mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
123                               &buf->buf);
124
125 out:
126         return err;
127 }
128
129 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
130 {
131         mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
132 }
133
134 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
135                                struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
136                                u64 buf_addr, int cqe)
137 {
138         int err;
139
140         *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
141                             IB_ACCESS_LOCAL_WRITE, 1);
142         if (IS_ERR(*umem))
143                 return PTR_ERR(*umem);
144
145         err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
146                             ilog2((*umem)->page_size), &buf->mtt);
147         if (err)
148                 goto err_buf;
149
150         err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
151         if (err)
152                 goto err_mtt;
153
154         return 0;
155
156 err_mtt:
157         mlx4_mtt_cleanup(dev->dev, &buf->mtt);
158
159 err_buf:
160         ib_umem_release(*umem);
161
162         return err;
163 }
164
165 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
166                                 struct ib_ucontext *context,
167                                 struct ib_udata *udata)
168 {
169         struct mlx4_ib_dev *dev = to_mdev(ibdev);
170         struct mlx4_ib_cq *cq;
171         struct mlx4_uar *uar;
172         int err;
173
174         if (entries < 1 || entries > dev->dev->caps.max_cqes)
175                 return ERR_PTR(-EINVAL);
176
177         cq = kmalloc(sizeof *cq, GFP_KERNEL);
178         if (!cq)
179                 return ERR_PTR(-ENOMEM);
180
181         entries      = roundup_pow_of_two(entries + 1);
182         cq->ibcq.cqe = entries - 1;
183         mutex_init(&cq->resize_mutex);
184         spin_lock_init(&cq->lock);
185         cq->resize_buf = NULL;
186         cq->resize_umem = NULL;
187
188         if (context) {
189                 struct mlx4_ib_create_cq ucmd;
190
191                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
192                         err = -EFAULT;
193                         goto err_cq;
194                 }
195
196                 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
197                                           ucmd.buf_addr, entries);
198                 if (err)
199                         goto err_cq;
200
201                 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
202                                           &cq->db);
203                 if (err)
204                         goto err_mtt;
205
206                 uar = &to_mucontext(context)->uar;
207         } else {
208                 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
209                 if (err)
210                         goto err_cq;
211
212                 cq->mcq.set_ci_db  = cq->db.db;
213                 cq->mcq.arm_db     = cq->db.db + 1;
214                 *cq->mcq.set_ci_db = 0;
215                 *cq->mcq.arm_db    = 0;
216
217                 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
218                 if (err)
219                         goto err_db;
220
221                 uar = &dev->priv_uar;
222         }
223
224         err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
225                             cq->db.dma, &cq->mcq, vector, 0);
226         if (err)
227                 goto err_dbmap;
228
229         cq->mcq.comp  = mlx4_ib_cq_comp;
230         cq->mcq.event = mlx4_ib_cq_event;
231
232         if (context)
233                 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
234                         err = -EFAULT;
235                         goto err_dbmap;
236                 }
237
238         return &cq->ibcq;
239
240 err_dbmap:
241         if (context)
242                 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
243
244 err_mtt:
245         mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
246
247         if (context)
248                 ib_umem_release(cq->umem);
249         else
250                 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
251
252 err_db:
253         if (!context)
254                 mlx4_db_free(dev->dev, &cq->db);
255
256 err_cq:
257         kfree(cq);
258
259         return ERR_PTR(err);
260 }
261
262 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
263                                   int entries)
264 {
265         int err;
266
267         if (cq->resize_buf)
268                 return -EBUSY;
269
270         cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
271         if (!cq->resize_buf)
272                 return -ENOMEM;
273
274         err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
275         if (err) {
276                 kfree(cq->resize_buf);
277                 cq->resize_buf = NULL;
278                 return err;
279         }
280
281         cq->resize_buf->cqe = entries - 1;
282
283         return 0;
284 }
285
286 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
287                                    int entries, struct ib_udata *udata)
288 {
289         struct mlx4_ib_resize_cq ucmd;
290         int err;
291
292         if (cq->resize_umem)
293                 return -EBUSY;
294
295         if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
296                 return -EFAULT;
297
298         cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
299         if (!cq->resize_buf)
300                 return -ENOMEM;
301
302         err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
303                                   &cq->resize_umem, ucmd.buf_addr, entries);
304         if (err) {
305                 kfree(cq->resize_buf);
306                 cq->resize_buf = NULL;
307                 return err;
308         }
309
310         cq->resize_buf->cqe = entries - 1;
311
312         return 0;
313 }
314
315 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
316 {
317         u32 i;
318
319         i = cq->mcq.cons_index;
320         while (get_sw_cqe(cq, i & cq->ibcq.cqe))
321                 ++i;
322
323         return i - cq->mcq.cons_index;
324 }
325
326 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
327 {
328         struct mlx4_cqe *cqe, *new_cqe;
329         int i;
330
331         i = cq->mcq.cons_index;
332         cqe = get_cqe(cq, i & cq->ibcq.cqe);
333         while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
334                 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
335                                            (i + 1) & cq->resize_buf->cqe);
336                 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
337                 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
338                         (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
339                 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
340         }
341         ++cq->mcq.cons_index;
342 }
343
344 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
345 {
346         struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
347         struct mlx4_ib_cq *cq = to_mcq(ibcq);
348         struct mlx4_mtt mtt;
349         int outst_cqe;
350         int err;
351
352         mutex_lock(&cq->resize_mutex);
353
354         if (entries < 1 || entries > dev->dev->caps.max_cqes) {
355                 err = -EINVAL;
356                 goto out;
357         }
358
359         entries = roundup_pow_of_two(entries + 1);
360         if (entries == ibcq->cqe + 1) {
361                 err = 0;
362                 goto out;
363         }
364
365         if (ibcq->uobject) {
366                 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
367                 if (err)
368                         goto out;
369         } else {
370                 /* Can't be smaller then the number of outstanding CQEs */
371                 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
372                 if (entries < outst_cqe + 1) {
373                         err = 0;
374                         goto out;
375                 }
376
377                 err = mlx4_alloc_resize_buf(dev, cq, entries);
378                 if (err)
379                         goto out;
380         }
381
382         mtt = cq->buf.mtt;
383
384         err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
385         if (err)
386                 goto err_buf;
387
388         mlx4_mtt_cleanup(dev->dev, &mtt);
389         if (ibcq->uobject) {
390                 cq->buf      = cq->resize_buf->buf;
391                 cq->ibcq.cqe = cq->resize_buf->cqe;
392                 ib_umem_release(cq->umem);
393                 cq->umem     = cq->resize_umem;
394
395                 kfree(cq->resize_buf);
396                 cq->resize_buf = NULL;
397                 cq->resize_umem = NULL;
398         } else {
399                 spin_lock_irq(&cq->lock);
400                 if (cq->resize_buf) {
401                         mlx4_ib_cq_resize_copy_cqes(cq);
402                         mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
403                         cq->buf      = cq->resize_buf->buf;
404                         cq->ibcq.cqe = cq->resize_buf->cqe;
405
406                         kfree(cq->resize_buf);
407                         cq->resize_buf = NULL;
408                 }
409                 spin_unlock_irq(&cq->lock);
410         }
411
412         goto out;
413
414 err_buf:
415         mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
416         if (!ibcq->uobject)
417                 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
418                                     cq->resize_buf->cqe);
419
420         kfree(cq->resize_buf);
421         cq->resize_buf = NULL;
422
423         if (cq->resize_umem) {
424                 ib_umem_release(cq->resize_umem);
425                 cq->resize_umem = NULL;
426         }
427
428 out:
429         mutex_unlock(&cq->resize_mutex);
430         return err;
431 }
432
433 int mlx4_ib_destroy_cq(struct ib_cq *cq)
434 {
435         struct mlx4_ib_dev *dev = to_mdev(cq->device);
436         struct mlx4_ib_cq *mcq = to_mcq(cq);
437
438         mlx4_cq_free(dev->dev, &mcq->mcq);
439         mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
440
441         if (cq->uobject) {
442                 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
443                 ib_umem_release(mcq->umem);
444         } else {
445                 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
446                 mlx4_db_free(dev->dev, &mcq->db);
447         }
448
449         kfree(mcq);
450
451         return 0;
452 }
453
454 static void dump_cqe(void *cqe)
455 {
456         __be32 *buf = cqe;
457
458         printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
459                be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
460                be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
461                be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
462 }
463
464 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
465                                      struct ib_wc *wc)
466 {
467         if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
468                 printk(KERN_DEBUG "local QP operation err "
469                        "(QPN %06x, WQE index %x, vendor syndrome %02x, "
470                        "opcode = %02x)\n",
471                        be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
472                        cqe->vendor_err_syndrome,
473                        cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
474                 dump_cqe(cqe);
475         }
476
477         switch (cqe->syndrome) {
478         case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
479                 wc->status = IB_WC_LOC_LEN_ERR;
480                 break;
481         case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
482                 wc->status = IB_WC_LOC_QP_OP_ERR;
483                 break;
484         case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
485                 wc->status = IB_WC_LOC_PROT_ERR;
486                 break;
487         case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
488                 wc->status = IB_WC_WR_FLUSH_ERR;
489                 break;
490         case MLX4_CQE_SYNDROME_MW_BIND_ERR:
491                 wc->status = IB_WC_MW_BIND_ERR;
492                 break;
493         case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
494                 wc->status = IB_WC_BAD_RESP_ERR;
495                 break;
496         case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
497                 wc->status = IB_WC_LOC_ACCESS_ERR;
498                 break;
499         case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
500                 wc->status = IB_WC_REM_INV_REQ_ERR;
501                 break;
502         case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
503                 wc->status = IB_WC_REM_ACCESS_ERR;
504                 break;
505         case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
506                 wc->status = IB_WC_REM_OP_ERR;
507                 break;
508         case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
509                 wc->status = IB_WC_RETRY_EXC_ERR;
510                 break;
511         case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
512                 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
513                 break;
514         case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
515                 wc->status = IB_WC_REM_ABORT_ERR;
516                 break;
517         default:
518                 wc->status = IB_WC_GENERAL_ERR;
519                 break;
520         }
521
522         wc->vendor_err = cqe->vendor_err_syndrome;
523 }
524
525 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
526 {
527         return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4      |
528                                       MLX4_CQE_STATUS_IPV4F     |
529                                       MLX4_CQE_STATUS_IPV4OPT   |
530                                       MLX4_CQE_STATUS_IPV6      |
531                                       MLX4_CQE_STATUS_IPOK)) ==
532                 cpu_to_be16(MLX4_CQE_STATUS_IPV4        |
533                             MLX4_CQE_STATUS_IPOK))              &&
534                 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP       |
535                                       MLX4_CQE_STATUS_TCP))     &&
536                 checksum == cpu_to_be16(0xffff);
537 }
538
539 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
540                             struct mlx4_ib_qp **cur_qp,
541                             struct ib_wc *wc)
542 {
543         struct mlx4_cqe *cqe;
544         struct mlx4_qp *mqp;
545         struct mlx4_ib_wq *wq;
546         struct mlx4_ib_srq *srq;
547         int is_send;
548         int is_error;
549         u32 g_mlpath_rqpn;
550         u16 wqe_ctr;
551
552 repoll:
553         cqe = next_cqe_sw(cq);
554         if (!cqe)
555                 return -EAGAIN;
556
557         ++cq->mcq.cons_index;
558
559         /*
560          * Make sure we read CQ entry contents after we've checked the
561          * ownership bit.
562          */
563         rmb();
564
565         is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
566         is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
567                 MLX4_CQE_OPCODE_ERROR;
568
569         if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
570                      is_send)) {
571                 printk(KERN_WARNING "Completion for NOP opcode detected!\n");
572                 return -EINVAL;
573         }
574
575         /* Resize CQ in progress */
576         if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
577                 if (cq->resize_buf) {
578                         struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
579
580                         mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
581                         cq->buf      = cq->resize_buf->buf;
582                         cq->ibcq.cqe = cq->resize_buf->cqe;
583
584                         kfree(cq->resize_buf);
585                         cq->resize_buf = NULL;
586                 }
587
588                 goto repoll;
589         }
590
591         if (!*cur_qp ||
592             (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
593                 /*
594                  * We do not have to take the QP table lock here,
595                  * because CQs will be locked while QPs are removed
596                  * from the table.
597                  */
598                 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
599                                        be32_to_cpu(cqe->vlan_my_qpn));
600                 if (unlikely(!mqp)) {
601                         printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
602                                cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
603                         return -EINVAL;
604                 }
605
606                 *cur_qp = to_mibqp(mqp);
607         }
608
609         wc->qp = &(*cur_qp)->ibqp;
610
611         if (is_send) {
612                 wq = &(*cur_qp)->sq;
613                 if (!(*cur_qp)->sq_signal_bits) {
614                         wqe_ctr = be16_to_cpu(cqe->wqe_index);
615                         wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
616                 }
617                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
618                 ++wq->tail;
619         } else if ((*cur_qp)->ibqp.srq) {
620                 srq = to_msrq((*cur_qp)->ibqp.srq);
621                 wqe_ctr = be16_to_cpu(cqe->wqe_index);
622                 wc->wr_id = srq->wrid[wqe_ctr];
623                 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
624         } else {
625                 wq        = &(*cur_qp)->rq;
626                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
627                 ++wq->tail;
628         }
629
630         if (unlikely(is_error)) {
631                 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
632                 return 0;
633         }
634
635         wc->status = IB_WC_SUCCESS;
636
637         if (is_send) {
638                 wc->wc_flags = 0;
639                 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
640                 case MLX4_OPCODE_RDMA_WRITE_IMM:
641                         wc->wc_flags |= IB_WC_WITH_IMM;
642                 case MLX4_OPCODE_RDMA_WRITE:
643                         wc->opcode    = IB_WC_RDMA_WRITE;
644                         break;
645                 case MLX4_OPCODE_SEND_IMM:
646                         wc->wc_flags |= IB_WC_WITH_IMM;
647                 case MLX4_OPCODE_SEND:
648                 case MLX4_OPCODE_SEND_INVAL:
649                         wc->opcode    = IB_WC_SEND;
650                         break;
651                 case MLX4_OPCODE_RDMA_READ:
652                         wc->opcode    = IB_WC_RDMA_READ;
653                         wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
654                         break;
655                 case MLX4_OPCODE_ATOMIC_CS:
656                         wc->opcode    = IB_WC_COMP_SWAP;
657                         wc->byte_len  = 8;
658                         break;
659                 case MLX4_OPCODE_ATOMIC_FA:
660                         wc->opcode    = IB_WC_FETCH_ADD;
661                         wc->byte_len  = 8;
662                         break;
663                 case MLX4_OPCODE_BIND_MW:
664                         wc->opcode    = IB_WC_BIND_MW;
665                         break;
666                 case MLX4_OPCODE_LSO:
667                         wc->opcode    = IB_WC_LSO;
668                         break;
669                 case MLX4_OPCODE_FMR:
670                         wc->opcode    = IB_WC_FAST_REG_MR;
671                         break;
672                 case MLX4_OPCODE_LOCAL_INVAL:
673                         wc->opcode    = IB_WC_LOCAL_INV;
674                         break;
675                 }
676         } else {
677                 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
678
679                 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
680                 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
681                         wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
682                         wc->wc_flags    = IB_WC_WITH_IMM;
683                         wc->ex.imm_data = cqe->immed_rss_invalid;
684                         break;
685                 case MLX4_RECV_OPCODE_SEND_INVAL:
686                         wc->opcode      = IB_WC_RECV;
687                         wc->wc_flags    = IB_WC_WITH_INVALIDATE;
688                         wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
689                         break;
690                 case MLX4_RECV_OPCODE_SEND:
691                         wc->opcode   = IB_WC_RECV;
692                         wc->wc_flags = 0;
693                         break;
694                 case MLX4_RECV_OPCODE_SEND_IMM:
695                         wc->opcode      = IB_WC_RECV;
696                         wc->wc_flags    = IB_WC_WITH_IMM;
697                         wc->ex.imm_data = cqe->immed_rss_invalid;
698                         break;
699                 }
700
701                 wc->slid           = be16_to_cpu(cqe->rlid);
702                 wc->sl             = be16_to_cpu(cqe->sl_vid >> 12);
703                 g_mlpath_rqpn      = be32_to_cpu(cqe->g_mlpath_rqpn);
704                 wc->src_qp         = g_mlpath_rqpn & 0xffffff;
705                 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
706                 wc->wc_flags      |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
707                 wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
708                 wc->csum_ok        = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
709         }
710
711         return 0;
712 }
713
714 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
715 {
716         struct mlx4_ib_cq *cq = to_mcq(ibcq);
717         struct mlx4_ib_qp *cur_qp = NULL;
718         unsigned long flags;
719         int npolled;
720         int err = 0;
721
722         spin_lock_irqsave(&cq->lock, flags);
723
724         for (npolled = 0; npolled < num_entries; ++npolled) {
725                 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
726                 if (err)
727                         break;
728         }
729
730         if (npolled)
731                 mlx4_cq_set_ci(&cq->mcq);
732
733         spin_unlock_irqrestore(&cq->lock, flags);
734
735         if (err == 0 || err == -EAGAIN)
736                 return npolled;
737         else
738                 return err;
739 }
740
741 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
742 {
743         mlx4_cq_arm(&to_mcq(ibcq)->mcq,
744                     (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
745                     MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
746                     to_mdev(ibcq->device)->uar_map,
747                     MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
748
749         return 0;
750 }
751
752 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
753 {
754         u32 prod_index;
755         int nfreed = 0;
756         struct mlx4_cqe *cqe, *dest;
757         u8 owner_bit;
758
759         /*
760          * First we need to find the current producer index, so we
761          * know where to start cleaning from.  It doesn't matter if HW
762          * adds new entries after this loop -- the QP we're worried
763          * about is already in RESET, so the new entries won't come
764          * from our QP and therefore don't need to be checked.
765          */
766         for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
767                 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
768                         break;
769
770         /*
771          * Now sweep backwards through the CQ, removing CQ entries
772          * that match our QP by copying older entries on top of them.
773          */
774         while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
775                 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
776                 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
777                         if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
778                                 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
779                         ++nfreed;
780                 } else if (nfreed) {
781                         dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
782                         owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
783                         memcpy(dest, cqe, sizeof *cqe);
784                         dest->owner_sr_opcode = owner_bit |
785                                 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
786                 }
787         }
788
789         if (nfreed) {
790                 cq->mcq.cons_index += nfreed;
791                 /*
792                  * Make sure update of buffer contents is done before
793                  * updating consumer index.
794                  */
795                 wmb();
796                 mlx4_cq_set_ci(&cq->mcq);
797         }
798 }
799
800 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
801 {
802         spin_lock_irq(&cq->lock);
803         __mlx4_ib_cq_clean(cq, qpn, srq);
804         spin_unlock_irq(&cq->lock);
805 }