2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_post_srq_receive - post a receive on a shared receive queue
41 * @ibsrq: the SRQ to post the receive on
42 * @wr: the list of work requests to post
43 * @bad_wr: the first WR to cause a problem is put here
45 * This may be called from interrupt context.
47 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
48 struct ib_recv_wr **bad_wr)
50 struct ipath_srq *srq = to_isrq(ibsrq);
55 for (; wr; wr = wr->next) {
56 struct ipath_rwqe *wqe;
60 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
66 spin_lock_irqsave(&srq->rq.lock, flags);
69 if (next >= srq->rq.size)
71 if (next == wq->tail) {
72 spin_unlock_irqrestore(&srq->rq.lock, flags);
78 wqe = get_rwqe_ptr(&srq->rq, wq->head);
79 wqe->wr_id = wr->wr_id;
80 wqe->num_sge = wr->num_sge;
81 for (i = 0; i < wr->num_sge; i++)
82 wqe->sg_list[i] = wr->sg_list[i];
84 spin_unlock_irqrestore(&srq->rq.lock, flags);
93 * ipath_create_srq - create a shared receive queue
94 * @ibpd: the protection domain of the SRQ to create
95 * @attr: the attributes of the SRQ
96 * @udata: not used by the InfiniPath verbs driver
98 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
99 struct ib_srq_init_attr *srq_init_attr,
100 struct ib_udata *udata)
102 struct ipath_ibdev *dev = to_idev(ibpd->device);
103 struct ipath_srq *srq;
107 if (srq_init_attr->attr.max_wr == 0) {
108 ret = ERR_PTR(-EINVAL);
112 if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
113 (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
114 ret = ERR_PTR(-EINVAL);
118 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
120 ret = ERR_PTR(-ENOMEM);
125 * Need to use vmalloc() if we want to support large #s of entries.
127 srq->rq.size = srq_init_attr->attr.max_wr + 1;
128 srq->rq.max_sge = srq_init_attr->attr.max_sge;
129 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
130 sizeof(struct ipath_rwqe);
131 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
133 ret = ERR_PTR(-ENOMEM);
138 * Return the address of the RWQ as the offset to mmap.
139 * See ipath_mmap() for details.
141 if (udata && udata->outlen >= sizeof(__u64)) {
143 u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
146 ipath_create_mmap_info(dev, s,
147 ibpd->uobject->context,
150 ret = ERR_PTR(-ENOMEM);
154 err = ib_copy_to_udata(udata, &srq->ip->offset,
155 sizeof(srq->ip->offset));
164 * ib_create_srq() will initialize srq->ibsrq.
166 spin_lock_init(&srq->rq.lock);
167 srq->rq.wq->head = 0;
168 srq->rq.wq->tail = 0;
169 srq->limit = srq_init_attr->attr.srq_limit;
171 spin_lock(&dev->n_srqs_lock);
172 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
173 spin_unlock(&dev->n_srqs_lock);
174 ret = ERR_PTR(-ENOMEM);
178 dev->n_srqs_allocated++;
179 spin_unlock(&dev->n_srqs_lock);
182 spin_lock_irq(&dev->pending_lock);
183 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
184 spin_unlock_irq(&dev->pending_lock);
201 * ipath_modify_srq - modify a shared receive queue
202 * @ibsrq: the SRQ to modify
203 * @attr: the new attributes of the SRQ
204 * @attr_mask: indicates which attributes to modify
205 * @udata: user data for ipathverbs.so
207 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
208 enum ib_srq_attr_mask attr_mask,
209 struct ib_udata *udata)
211 struct ipath_srq *srq = to_isrq(ibsrq);
214 if (attr_mask & IB_SRQ_MAX_WR) {
215 struct ipath_rwq *owq;
216 struct ipath_rwq *wq;
217 struct ipath_rwqe *p;
218 u32 sz, size, n, head, tail;
220 /* Check that the requested sizes are below the limits. */
221 if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
222 ((attr_mask & IB_SRQ_LIMIT) ?
223 attr->srq_limit : srq->limit) > attr->max_wr) {
228 sz = sizeof(struct ipath_rwqe) +
229 srq->rq.max_sge * sizeof(struct ib_sge);
230 size = attr->max_wr + 1;
231 wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
238 * Return the address of the RWQ as the offset to mmap.
239 * See ipath_mmap() for details.
241 if (udata && udata->inlen >= sizeof(__u64)) {
243 __u64 offset = (__u64) wq;
245 ret = ib_copy_from_udata(&offset_addr, udata,
246 sizeof(offset_addr));
251 udata->outbuf = (void __user *) offset_addr;
252 ret = ib_copy_to_udata(udata, &offset,
260 spin_lock_irq(&srq->rq.lock);
262 * validate head pointer value and compute
263 * the number of remaining WQEs.
267 if (head >= srq->rq.size)
270 if (tail >= srq->rq.size)
274 n += srq->rq.size - tail;
278 spin_unlock_irq(&srq->rq.lock);
285 while (tail != head) {
286 struct ipath_rwqe *wqe;
289 wqe = get_rwqe_ptr(&srq->rq, tail);
290 p->wr_id = wqe->wr_id;
291 p->num_sge = wqe->num_sge;
292 for (i = 0; i < wqe->num_sge; i++)
293 p->sg_list[i] = wqe->sg_list[i];
295 p = (struct ipath_rwqe *)((char *) p + sz);
296 if (++tail >= srq->rq.size)
303 if (attr_mask & IB_SRQ_LIMIT)
304 srq->limit = attr->srq_limit;
305 spin_unlock_irq(&srq->rq.lock);
310 struct ipath_mmap_info *ip = srq->ip;
311 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
312 u32 s = sizeof(struct ipath_rwq) + size * sz;
314 ipath_update_mmap_info(dev, ip, s, wq);
315 spin_lock_irq(&dev->pending_lock);
316 if (list_empty(&ip->pending_mmaps))
317 list_add(&ip->pending_mmaps,
318 &dev->pending_mmaps);
319 spin_unlock_irq(&dev->pending_lock);
321 } else if (attr_mask & IB_SRQ_LIMIT) {
322 spin_lock_irq(&srq->rq.lock);
323 if (attr->srq_limit >= srq->rq.size)
326 srq->limit = attr->srq_limit;
327 spin_unlock_irq(&srq->rq.lock);
334 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
336 struct ipath_srq *srq = to_isrq(ibsrq);
338 attr->max_wr = srq->rq.size - 1;
339 attr->max_sge = srq->rq.max_sge;
340 attr->srq_limit = srq->limit;
345 * ipath_destroy_srq - destroy a shared receive queue
346 * @ibsrq: the SRQ to destroy
348 int ipath_destroy_srq(struct ib_srq *ibsrq)
350 struct ipath_srq *srq = to_isrq(ibsrq);
351 struct ipath_ibdev *dev = to_idev(ibsrq->device);
353 spin_lock(&dev->n_srqs_lock);
354 dev->n_srqs_allocated--;
355 spin_unlock(&dev->n_srqs_lock);
357 kref_put(&srq->ip->ref, ipath_release_mmap_info);