2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_smi.h>
39 /* Fast memory region */
42 struct qib_mregion mr; /* must be last */
45 static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
47 return container_of(ibfmr, struct qib_fmr, ibfmr);
51 * qib_get_dma_mr - get a DMA memory region
52 * @pd: protection domain for this memory region
55 * Returns the memory region on success, otherwise returns an errno.
56 * Note that all DMA addresses should be created via the
57 * struct ib_dma_mapping_ops functions (see qib_dma.c).
59 struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
61 struct qib_ibdev *dev = to_idev(pd->device);
66 if (to_ipd(pd)->user) {
67 ret = ERR_PTR(-EPERM);
71 mr = kzalloc(sizeof *mr, GFP_KERNEL);
73 ret = ERR_PTR(-ENOMEM);
77 mr->mr.access_flags = acc;
78 atomic_set(&mr->mr.refcount, 0);
80 spin_lock_irqsave(&dev->lk_table.lock, flags);
82 dev->dma_mr = &mr->mr;
83 spin_unlock_irqrestore(&dev->lk_table.lock, flags);
91 static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
96 /* Allocate struct plus pointers to first level page tables. */
97 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
98 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
102 /* Allocate first level page tables. */
104 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
109 mr->mr.page_shift = 0;
110 mr->mr.max_segs = count;
113 * ib_reg_phys_mr() will initialize mr->ibmr except for
116 if (!qib_alloc_lkey(lk_table, &mr->mr))
118 mr->ibmr.lkey = mr->mr.lkey;
119 mr->ibmr.rkey = mr->mr.lkey;
121 atomic_set(&mr->mr.refcount, 0);
126 kfree(mr->mr.map[--i]);
135 * qib_reg_phys_mr - register a physical memory region
136 * @pd: protection domain for this memory region
137 * @buffer_list: pointer to the list of physical buffers to register
138 * @num_phys_buf: the number of physical buffers to register
139 * @iova_start: the starting address passed over IB which maps to this MR
141 * Returns the memory region on success, otherwise returns an errno.
143 struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
144 struct ib_phys_buf *buffer_list,
145 int num_phys_buf, int acc, u64 *iova_start)
151 mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
153 ret = ERR_PTR(-ENOMEM);
158 mr->mr.user_base = *iova_start;
159 mr->mr.iova = *iova_start;
162 mr->mr.access_flags = acc;
167 for (i = 0; i < num_phys_buf; i++) {
168 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
169 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
170 mr->mr.length += buffer_list[i].size;
172 if (n == QIB_SEGSZ) {
185 * qib_reg_user_mr - register a userspace memory region
186 * @pd: protection domain for this memory region
187 * @start: starting userspace address
188 * @length: length of region to register
189 * @virt_addr: virtual address to use (from HCA's point of view)
190 * @mr_access_flags: access flags for this memory region
191 * @udata: unused by the QLogic_IB driver
193 * Returns the memory region on success, otherwise returns an errno.
195 struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
196 u64 virt_addr, int mr_access_flags,
197 struct ib_udata *udata)
200 struct ib_umem *umem;
201 struct ib_umem_chunk *chunk;
206 ret = ERR_PTR(-EINVAL);
210 umem = ib_umem_get(pd->uobject->context, start, length,
213 return (void *) umem;
216 list_for_each_entry(chunk, &umem->chunk_list, list)
219 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
221 ret = ERR_PTR(-ENOMEM);
222 ib_umem_release(umem);
227 mr->mr.user_base = start;
228 mr->mr.iova = virt_addr;
229 mr->mr.length = length;
230 mr->mr.offset = umem->offset;
231 mr->mr.access_flags = mr_access_flags;
234 if (is_power_of_2(umem->page_size))
235 mr->mr.page_shift = ilog2(umem->page_size);
238 list_for_each_entry(chunk, &umem->chunk_list, list) {
239 for (i = 0; i < chunk->nents; i++) {
242 vaddr = page_address(sg_page(&chunk->page_list[i]));
244 ret = ERR_PTR(-EINVAL);
247 mr->mr.map[m]->segs[n].vaddr = vaddr;
248 mr->mr.map[m]->segs[n].length = umem->page_size;
250 if (n == QIB_SEGSZ) {
263 * qib_dereg_mr - unregister and free a memory region
264 * @ibmr: the memory region to free
266 * Returns 0 on success.
268 * Note that this is called to free MRs created by qib_get_dma_mr()
269 * or qib_reg_user_mr().
271 int qib_dereg_mr(struct ib_mr *ibmr)
273 struct qib_mr *mr = to_imr(ibmr);
274 struct qib_ibdev *dev = to_idev(ibmr->device);
278 ret = qib_free_lkey(dev, &mr->mr);
284 kfree(mr->mr.map[--i]);
286 ib_umem_release(mr->umem);
292 * Allocate a memory region usable with the
293 * IB_WR_FAST_REG_MR send work request.
295 * Return the memory region on success, otherwise return an errno.
297 struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
301 mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
303 return ERR_PTR(-ENOMEM);
306 mr->mr.user_base = 0;
310 mr->mr.access_flags = 0;
316 struct ib_fast_reg_page_list *
317 qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
319 unsigned size = page_list_len * sizeof(u64);
320 struct ib_fast_reg_page_list *pl;
322 if (size > PAGE_SIZE)
323 return ERR_PTR(-EINVAL);
325 pl = kmalloc(sizeof *pl, GFP_KERNEL);
327 return ERR_PTR(-ENOMEM);
329 pl->page_list = kmalloc(size, GFP_KERNEL);
337 return ERR_PTR(-ENOMEM);
340 void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
342 kfree(pl->page_list);
347 * qib_alloc_fmr - allocate a fast memory region
348 * @pd: the protection domain for this memory region
349 * @mr_access_flags: access flags for this memory region
350 * @fmr_attr: fast memory region attributes
352 * Returns the memory region on success, otherwise returns an errno.
354 struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
355 struct ib_fmr_attr *fmr_attr)
361 /* Allocate struct plus pointers to first level page tables. */
362 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
363 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
367 /* Allocate first level page tables. */
369 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
377 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
380 if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
382 fmr->ibfmr.rkey = fmr->mr.lkey;
383 fmr->ibfmr.lkey = fmr->mr.lkey;
385 * Resources are allocated but no valid mapping (RKEY can't be
389 fmr->mr.user_base = 0;
393 fmr->mr.access_flags = mr_access_flags;
394 fmr->mr.max_segs = fmr_attr->max_pages;
395 fmr->mr.page_shift = fmr_attr->page_shift;
397 atomic_set(&fmr->mr.refcount, 0);
403 kfree(fmr->mr.map[--i]);
405 ret = ERR_PTR(-ENOMEM);
412 * qib_map_phys_fmr - set up a fast memory region
413 * @ibmfr: the fast memory region to set up
414 * @page_list: the list of pages to associate with the fast memory region
415 * @list_len: the number of pages to associate with the fast memory region
416 * @iova: the virtual address of the start of the fast memory region
418 * This may be called from interrupt context.
421 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
422 int list_len, u64 iova)
424 struct qib_fmr *fmr = to_ifmr(ibfmr);
425 struct qib_lkey_table *rkt;
431 if (atomic_read(&fmr->mr.refcount))
434 if (list_len > fmr->mr.max_segs) {
438 rkt = &to_idev(ibfmr->device)->lk_table;
439 spin_lock_irqsave(&rkt->lock, flags);
440 fmr->mr.user_base = iova;
442 ps = 1 << fmr->mr.page_shift;
443 fmr->mr.length = list_len * ps;
446 for (i = 0; i < list_len; i++) {
447 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
448 fmr->mr.map[m]->segs[n].length = ps;
449 if (++n == QIB_SEGSZ) {
454 spin_unlock_irqrestore(&rkt->lock, flags);
462 * qib_unmap_fmr - unmap fast memory regions
463 * @fmr_list: the list of fast memory regions to unmap
465 * Returns 0 on success.
467 int qib_unmap_fmr(struct list_head *fmr_list)
470 struct qib_lkey_table *rkt;
473 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
474 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
475 spin_lock_irqsave(&rkt->lock, flags);
476 fmr->mr.user_base = 0;
479 spin_unlock_irqrestore(&rkt->lock, flags);
485 * qib_dealloc_fmr - deallocate a fast memory region
486 * @ibfmr: the fast memory region to deallocate
488 * Returns 0 on success.
490 int qib_dealloc_fmr(struct ib_fmr *ibfmr)
492 struct qib_fmr *fmr = to_ifmr(ibfmr);
496 ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
502 kfree(fmr->mr.map[--i]);