Merge branch 'intx' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/misc-2.6
[pandora-kernel.git] / drivers / infiniband / hw / ehca / ehca_mrmw.c
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  MR/MW functions
5  *
6  *  Authors: Dietmar Decker <ddecker@de.ibm.com>
7  *           Christoph Raisch <raisch@de.ibm.com>
8  *
9  *  Copyright (c) 2005 IBM Corporation
10  *
11  *  All rights reserved.
12  *
13  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
14  *  BSD.
15  *
16  * OpenIB BSD License
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are met:
20  *
21  * Redistributions of source code must retain the above copyright notice, this
22  * list of conditions and the following disclaimer.
23  *
24  * Redistributions in binary form must reproduce the above copyright notice,
25  * this list of conditions and the following disclaimer in the documentation
26  * and/or other materials
27  * provided with the distribution.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41
42 #include <asm/current.h>
43
44 #include "ehca_iverbs.h"
45 #include "ehca_mrmw.h"
46 #include "hcp_if.h"
47 #include "hipz_hw.h"
48
49 static struct kmem_cache *mr_cache;
50 static struct kmem_cache *mw_cache;
51
52 static struct ehca_mr *ehca_mr_new(void)
53 {
54         struct ehca_mr *me;
55
56         me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
57         if (me) {
58                 memset(me, 0, sizeof(struct ehca_mr));
59                 spin_lock_init(&me->mrlock);
60         } else
61                 ehca_gen_err("alloc failed");
62
63         return me;
64 }
65
66 static void ehca_mr_delete(struct ehca_mr *me)
67 {
68         kmem_cache_free(mr_cache, me);
69 }
70
71 static struct ehca_mw *ehca_mw_new(void)
72 {
73         struct ehca_mw *me;
74
75         me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
76         if (me) {
77                 memset(me, 0, sizeof(struct ehca_mw));
78                 spin_lock_init(&me->mwlock);
79         } else
80                 ehca_gen_err("alloc failed");
81
82         return me;
83 }
84
85 static void ehca_mw_delete(struct ehca_mw *me)
86 {
87         kmem_cache_free(mw_cache, me);
88 }
89
90 /*----------------------------------------------------------------------*/
91
92 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
93 {
94         struct ib_mr *ib_mr;
95         int ret;
96         struct ehca_mr *e_maxmr;
97         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
98         struct ehca_shca *shca =
99                 container_of(pd->device, struct ehca_shca, ib_device);
100
101         if (shca->maxmr) {
102                 e_maxmr = ehca_mr_new();
103                 if (!e_maxmr) {
104                         ehca_err(&shca->ib_device, "out of memory");
105                         ib_mr = ERR_PTR(-ENOMEM);
106                         goto get_dma_mr_exit0;
107                 }
108
109                 ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
110                                      mr_access_flags, e_pd,
111                                      &e_maxmr->ib.ib_mr.lkey,
112                                      &e_maxmr->ib.ib_mr.rkey);
113                 if (ret) {
114                         ib_mr = ERR_PTR(ret);
115                         goto get_dma_mr_exit0;
116                 }
117                 ib_mr = &e_maxmr->ib.ib_mr;
118         } else {
119                 ehca_err(&shca->ib_device, "no internal max-MR exist!");
120                 ib_mr = ERR_PTR(-EINVAL);
121                 goto get_dma_mr_exit0;
122         }
123
124 get_dma_mr_exit0:
125         if (IS_ERR(ib_mr))
126                 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
127                          PTR_ERR(ib_mr), pd, mr_access_flags);
128         return ib_mr;
129 } /* end ehca_get_dma_mr() */
130
131 /*----------------------------------------------------------------------*/
132
133 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
134                                struct ib_phys_buf *phys_buf_array,
135                                int num_phys_buf,
136                                int mr_access_flags,
137                                u64 *iova_start)
138 {
139         struct ib_mr *ib_mr;
140         int ret;
141         struct ehca_mr *e_mr;
142         struct ehca_shca *shca =
143                 container_of(pd->device, struct ehca_shca, ib_device);
144         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
145
146         u64 size;
147         struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
148         u32 num_pages_mr;
149         u32 num_pages_4k; /* 4k portion "pages" */
150
151         if ((num_phys_buf <= 0) || !phys_buf_array) {
152                 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
153                          "phys_buf_array=%p", num_phys_buf, phys_buf_array);
154                 ib_mr = ERR_PTR(-EINVAL);
155                 goto reg_phys_mr_exit0;
156         }
157         if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
158              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
159             ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
160              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
161                 /*
162                  * Remote Write Access requires Local Write Access
163                  * Remote Atomic Access requires Local Write Access
164                  */
165                 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
166                          mr_access_flags);
167                 ib_mr = ERR_PTR(-EINVAL);
168                 goto reg_phys_mr_exit0;
169         }
170
171         /* check physical buffer list and calculate size */
172         ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
173                                             iova_start, &size);
174         if (ret) {
175                 ib_mr = ERR_PTR(ret);
176                 goto reg_phys_mr_exit0;
177         }
178         if ((size == 0) ||
179             (((u64)iova_start + size) < (u64)iova_start)) {
180                 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
181                          size, iova_start);
182                 ib_mr = ERR_PTR(-EINVAL);
183                 goto reg_phys_mr_exit0;
184         }
185
186         e_mr = ehca_mr_new();
187         if (!e_mr) {
188                 ehca_err(pd->device, "out of memory");
189                 ib_mr = ERR_PTR(-ENOMEM);
190                 goto reg_phys_mr_exit0;
191         }
192
193         /* determine number of MR pages */
194         num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
195                          PAGE_SIZE - 1) / PAGE_SIZE);
196         num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
197                          EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
198
199         /* register MR on HCA */
200         if (ehca_mr_is_maxmr(size, iova_start)) {
201                 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
202                 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
203                                      e_pd, &e_mr->ib.ib_mr.lkey,
204                                      &e_mr->ib.ib_mr.rkey);
205                 if (ret) {
206                         ib_mr = ERR_PTR(ret);
207                         goto reg_phys_mr_exit1;
208                 }
209         } else {
210                 pginfo.type           = EHCA_MR_PGI_PHYS;
211                 pginfo.num_pages      = num_pages_mr;
212                 pginfo.num_4k         = num_pages_4k;
213                 pginfo.num_phys_buf   = num_phys_buf;
214                 pginfo.phys_buf_array = phys_buf_array;
215                 pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
216                                          EHCA_PAGESIZE);
217
218                 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
219                                   e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
220                                   &e_mr->ib.ib_mr.rkey);
221                 if (ret) {
222                         ib_mr = ERR_PTR(ret);
223                         goto reg_phys_mr_exit1;
224                 }
225         }
226
227         /* successful registration of all pages */
228         return &e_mr->ib.ib_mr;
229
230 reg_phys_mr_exit1:
231         ehca_mr_delete(e_mr);
232 reg_phys_mr_exit0:
233         if (IS_ERR(ib_mr))
234                 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
235                          "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
236                          PTR_ERR(ib_mr), pd, phys_buf_array,
237                          num_phys_buf, mr_access_flags, iova_start);
238         return ib_mr;
239 } /* end ehca_reg_phys_mr() */
240
241 /*----------------------------------------------------------------------*/
242
243 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
244                                struct ib_umem *region,
245                                int mr_access_flags,
246                                struct ib_udata *udata)
247 {
248         struct ib_mr *ib_mr;
249         struct ehca_mr *e_mr;
250         struct ehca_shca *shca =
251                 container_of(pd->device, struct ehca_shca, ib_device);
252         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
253         struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
254         int ret;
255         u32 num_pages_mr;
256         u32 num_pages_4k; /* 4k portion "pages" */
257
258         if (!pd) {
259                 ehca_gen_err("bad pd=%p", pd);
260                 return ERR_PTR(-EFAULT);
261         }
262         if (!region) {
263                 ehca_err(pd->device, "bad input values: region=%p", region);
264                 ib_mr = ERR_PTR(-EINVAL);
265                 goto reg_user_mr_exit0;
266         }
267         if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
268              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
269             ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
270              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
271                 /*
272                  * Remote Write Access requires Local Write Access
273                  * Remote Atomic Access requires Local Write Access
274                  */
275                 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
276                          mr_access_flags);
277                 ib_mr = ERR_PTR(-EINVAL);
278                 goto reg_user_mr_exit0;
279         }
280         if (region->page_size != PAGE_SIZE) {
281                 ehca_err(pd->device, "page size not supported, "
282                          "region->page_size=%x", region->page_size);
283                 ib_mr = ERR_PTR(-EINVAL);
284                 goto reg_user_mr_exit0;
285         }
286
287         if ((region->length == 0) ||
288             ((region->virt_base + region->length) < region->virt_base)) {
289                 ehca_err(pd->device, "bad input values: length=%lx "
290                          "virt_base=%lx", region->length, region->virt_base);
291                 ib_mr = ERR_PTR(-EINVAL);
292                 goto reg_user_mr_exit0;
293         }
294
295         e_mr = ehca_mr_new();
296         if (!e_mr) {
297                 ehca_err(pd->device, "out of memory");
298                 ib_mr = ERR_PTR(-ENOMEM);
299                 goto reg_user_mr_exit0;
300         }
301
302         /* determine number of MR pages */
303         num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length +
304                          PAGE_SIZE - 1) / PAGE_SIZE);
305         num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length +
306                          EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
307
308         /* register MR on HCA */
309         pginfo.type       = EHCA_MR_PGI_USER;
310         pginfo.num_pages  = num_pages_mr;
311         pginfo.num_4k     = num_pages_4k;
312         pginfo.region     = region;
313         pginfo.next_4k    = region->offset / EHCA_PAGESIZE;
314         pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
315                                                (&region->chunk_list),
316                                                list);
317
318         ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base,
319                           region->length, mr_access_flags, e_pd, &pginfo,
320                           &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
321         if (ret) {
322                 ib_mr = ERR_PTR(ret);
323                 goto reg_user_mr_exit1;
324         }
325
326         /* successful registration of all pages */
327         return &e_mr->ib.ib_mr;
328
329 reg_user_mr_exit1:
330         ehca_mr_delete(e_mr);
331 reg_user_mr_exit0:
332         if (IS_ERR(ib_mr))
333                 ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x"
334                          " udata=%p",
335                          PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
336         return ib_mr;
337 } /* end ehca_reg_user_mr() */
338
339 /*----------------------------------------------------------------------*/
340
341 int ehca_rereg_phys_mr(struct ib_mr *mr,
342                        int mr_rereg_mask,
343                        struct ib_pd *pd,
344                        struct ib_phys_buf *phys_buf_array,
345                        int num_phys_buf,
346                        int mr_access_flags,
347                        u64 *iova_start)
348 {
349         int ret;
350
351         struct ehca_shca *shca =
352                 container_of(mr->device, struct ehca_shca, ib_device);
353         struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
354         struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
355         u64 new_size;
356         u64 *new_start;
357         u32 new_acl;
358         struct ehca_pd *new_pd;
359         u32 tmp_lkey, tmp_rkey;
360         unsigned long sl_flags;
361         u32 num_pages_mr = 0;
362         u32 num_pages_4k = 0; /* 4k portion "pages" */
363         struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
364         u32 cur_pid = current->tgid;
365
366         if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
367             (my_pd->ownpid != cur_pid)) {
368                 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
369                          cur_pid, my_pd->ownpid);
370                 ret = -EINVAL;
371                 goto rereg_phys_mr_exit0;
372         }
373
374         if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
375                 /* TODO not supported, because PHYP rereg hCall needs pages */
376                 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
377                          "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
378                 ret = -EINVAL;
379                 goto rereg_phys_mr_exit0;
380         }
381
382         if (mr_rereg_mask & IB_MR_REREG_PD) {
383                 if (!pd) {
384                         ehca_err(mr->device, "rereg with bad pd, pd=%p "
385                                  "mr_rereg_mask=%x", pd, mr_rereg_mask);
386                         ret = -EINVAL;
387                         goto rereg_phys_mr_exit0;
388                 }
389         }
390
391         if ((mr_rereg_mask &
392              ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
393             (mr_rereg_mask == 0)) {
394                 ret = -EINVAL;
395                 goto rereg_phys_mr_exit0;
396         }
397
398         /* check other parameters */
399         if (e_mr == shca->maxmr) {
400                 /* should be impossible, however reject to be sure */
401                 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
402                          "shca->maxmr=%p mr->lkey=%x",
403                          mr, shca->maxmr, mr->lkey);
404                 ret = -EINVAL;
405                 goto rereg_phys_mr_exit0;
406         }
407         if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
408                 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
409                         ehca_err(mr->device, "not supported for FMR, mr=%p "
410                                  "flags=%x", mr, e_mr->flags);
411                         ret = -EINVAL;
412                         goto rereg_phys_mr_exit0;
413                 }
414                 if (!phys_buf_array || num_phys_buf <= 0) {
415                         ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
416                                  " phys_buf_array=%p num_phys_buf=%x",
417                                  mr_rereg_mask, phys_buf_array, num_phys_buf);
418                         ret = -EINVAL;
419                         goto rereg_phys_mr_exit0;
420                 }
421         }
422         if ((mr_rereg_mask & IB_MR_REREG_ACCESS) &&     /* change ACL */
423             (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
424               !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
425              ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
426               !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
427                 /*
428                  * Remote Write Access requires Local Write Access
429                  * Remote Atomic Access requires Local Write Access
430                  */
431                 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
432                          "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
433                 ret = -EINVAL;
434                 goto rereg_phys_mr_exit0;
435         }
436
437         /* set requested values dependent on rereg request */
438         spin_lock_irqsave(&e_mr->mrlock, sl_flags);
439         new_start = e_mr->start;  /* new == old address */
440         new_size  = e_mr->size;   /* new == old length */
441         new_acl   = e_mr->acl;    /* new == old access control */
442         new_pd    = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
443
444         if (mr_rereg_mask & IB_MR_REREG_TRANS) {
445                 new_start = iova_start; /* change address */
446                 /* check physical buffer list and calculate size */
447                 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
448                                                     num_phys_buf, iova_start,
449                                                     &new_size);
450                 if (ret)
451                         goto rereg_phys_mr_exit1;
452                 if ((new_size == 0) ||
453                     (((u64)iova_start + new_size) < (u64)iova_start)) {
454                         ehca_err(mr->device, "bad input values: new_size=%lx "
455                                  "iova_start=%p", new_size, iova_start);
456                         ret = -EINVAL;
457                         goto rereg_phys_mr_exit1;
458                 }
459                 num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
460                                  PAGE_SIZE - 1) / PAGE_SIZE);
461                 num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
462                                  EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
463                 pginfo.type           = EHCA_MR_PGI_PHYS;
464                 pginfo.num_pages      = num_pages_mr;
465                 pginfo.num_4k         = num_pages_4k;
466                 pginfo.num_phys_buf   = num_phys_buf;
467                 pginfo.phys_buf_array = phys_buf_array;
468                 pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
469                                          EHCA_PAGESIZE);
470         }
471         if (mr_rereg_mask & IB_MR_REREG_ACCESS)
472                 new_acl = mr_access_flags;
473         if (mr_rereg_mask & IB_MR_REREG_PD)
474                 new_pd = container_of(pd, struct ehca_pd, ib_pd);
475
476         ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
477                             new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
478         if (ret)
479                 goto rereg_phys_mr_exit1;
480
481         /* successful reregistration */
482         if (mr_rereg_mask & IB_MR_REREG_PD)
483                 mr->pd = pd;
484         mr->lkey = tmp_lkey;
485         mr->rkey = tmp_rkey;
486
487 rereg_phys_mr_exit1:
488         spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
489 rereg_phys_mr_exit0:
490         if (ret)
491                 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
492                          "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
493                          "iova_start=%p",
494                          ret, mr, mr_rereg_mask, pd, phys_buf_array,
495                          num_phys_buf, mr_access_flags, iova_start);
496         return ret;
497 } /* end ehca_rereg_phys_mr() */
498
499 /*----------------------------------------------------------------------*/
500
501 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
502 {
503         int ret = 0;
504         u64 h_ret;
505         struct ehca_shca *shca =
506                 container_of(mr->device, struct ehca_shca, ib_device);
507         struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
508         struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
509         u32 cur_pid = current->tgid;
510         unsigned long sl_flags;
511         struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
512
513         if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
514             (my_pd->ownpid != cur_pid)) {
515                 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
516                          cur_pid, my_pd->ownpid);
517                 ret = -EINVAL;
518                 goto query_mr_exit0;
519         }
520
521         if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
522                 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
523                          "e_mr->flags=%x", mr, e_mr, e_mr->flags);
524                 ret = -EINVAL;
525                 goto query_mr_exit0;
526         }
527
528         memset(mr_attr, 0, sizeof(struct ib_mr_attr));
529         spin_lock_irqsave(&e_mr->mrlock, sl_flags);
530
531         h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
532         if (h_ret != H_SUCCESS) {
533                 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
534                          "hca_hndl=%lx mr_hndl=%lx lkey=%x",
535                          h_ret, mr, shca->ipz_hca_handle.handle,
536                          e_mr->ipz_mr_handle.handle, mr->lkey);
537                 ret = ehca_mrmw_map_hrc_query_mr(h_ret);
538                 goto query_mr_exit1;
539         }
540         mr_attr->pd               = mr->pd;
541         mr_attr->device_virt_addr = hipzout.vaddr;
542         mr_attr->size             = hipzout.len;
543         mr_attr->lkey             = hipzout.lkey;
544         mr_attr->rkey             = hipzout.rkey;
545         ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
546
547 query_mr_exit1:
548         spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
549 query_mr_exit0:
550         if (ret)
551                 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
552                          ret, mr, mr_attr);
553         return ret;
554 } /* end ehca_query_mr() */
555
556 /*----------------------------------------------------------------------*/
557
558 int ehca_dereg_mr(struct ib_mr *mr)
559 {
560         int ret = 0;
561         u64 h_ret;
562         struct ehca_shca *shca =
563                 container_of(mr->device, struct ehca_shca, ib_device);
564         struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
565         struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
566         u32 cur_pid = current->tgid;
567
568         if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
569             (my_pd->ownpid != cur_pid)) {
570                 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
571                          cur_pid, my_pd->ownpid);
572                 ret = -EINVAL;
573                 goto dereg_mr_exit0;
574         }
575
576         if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
577                 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
578                          "e_mr->flags=%x", mr, e_mr, e_mr->flags);
579                 ret = -EINVAL;
580                 goto dereg_mr_exit0;
581         } else if (e_mr == shca->maxmr) {
582                 /* should be impossible, however reject to be sure */
583                 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
584                          "shca->maxmr=%p mr->lkey=%x",
585                          mr, shca->maxmr, mr->lkey);
586                 ret = -EINVAL;
587                 goto dereg_mr_exit0;
588         }
589
590         /* TODO: BUSY: MR still has bound window(s) */
591         h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
592         if (h_ret != H_SUCCESS) {
593                 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
594                          "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
595                          h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
596                          e_mr->ipz_mr_handle.handle, mr->lkey);
597                 ret = ehca_mrmw_map_hrc_free_mr(h_ret);
598                 goto dereg_mr_exit0;
599         }
600
601         /* successful deregistration */
602         ehca_mr_delete(e_mr);
603
604 dereg_mr_exit0:
605         if (ret)
606                 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
607         return ret;
608 } /* end ehca_dereg_mr() */
609
610 /*----------------------------------------------------------------------*/
611
612 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
613 {
614         struct ib_mw *ib_mw;
615         u64 h_ret;
616         struct ehca_mw *e_mw;
617         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
618         struct ehca_shca *shca =
619                 container_of(pd->device, struct ehca_shca, ib_device);
620         struct ehca_mw_hipzout_parms hipzout = {{0},0};
621
622         e_mw = ehca_mw_new();
623         if (!e_mw) {
624                 ib_mw = ERR_PTR(-ENOMEM);
625                 goto alloc_mw_exit0;
626         }
627
628         h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
629                                          e_pd->fw_pd, &hipzout);
630         if (h_ret != H_SUCCESS) {
631                 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
632                          "shca=%p hca_hndl=%lx mw=%p",
633                          h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
634                 ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
635                 goto alloc_mw_exit1;
636         }
637         /* successful MW allocation */
638         e_mw->ipz_mw_handle = hipzout.handle;
639         e_mw->ib_mw.rkey    = hipzout.rkey;
640         return &e_mw->ib_mw;
641
642 alloc_mw_exit1:
643         ehca_mw_delete(e_mw);
644 alloc_mw_exit0:
645         if (IS_ERR(ib_mw))
646                 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
647         return ib_mw;
648 } /* end ehca_alloc_mw() */
649
650 /*----------------------------------------------------------------------*/
651
652 int ehca_bind_mw(struct ib_qp *qp,
653                  struct ib_mw *mw,
654                  struct ib_mw_bind *mw_bind)
655 {
656         /* TODO: not supported up to now */
657         ehca_gen_err("bind MW currently not supported by HCAD");
658
659         return -EPERM;
660 } /* end ehca_bind_mw() */
661
662 /*----------------------------------------------------------------------*/
663
664 int ehca_dealloc_mw(struct ib_mw *mw)
665 {
666         u64 h_ret;
667         struct ehca_shca *shca =
668                 container_of(mw->device, struct ehca_shca, ib_device);
669         struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
670
671         h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
672         if (h_ret != H_SUCCESS) {
673                 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
674                          "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
675                          h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
676                          e_mw->ipz_mw_handle.handle);
677                 return ehca_mrmw_map_hrc_free_mw(h_ret);
678         }
679         /* successful deallocation */
680         ehca_mw_delete(e_mw);
681         return 0;
682 } /* end ehca_dealloc_mw() */
683
684 /*----------------------------------------------------------------------*/
685
686 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
687                               int mr_access_flags,
688                               struct ib_fmr_attr *fmr_attr)
689 {
690         struct ib_fmr *ib_fmr;
691         struct ehca_shca *shca =
692                 container_of(pd->device, struct ehca_shca, ib_device);
693         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
694         struct ehca_mr *e_fmr;
695         int ret;
696         u32 tmp_lkey, tmp_rkey;
697         struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
698
699         /* check other parameters */
700         if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
701              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
702             ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
703              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
704                 /*
705                  * Remote Write Access requires Local Write Access
706                  * Remote Atomic Access requires Local Write Access
707                  */
708                 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
709                          mr_access_flags);
710                 ib_fmr = ERR_PTR(-EINVAL);
711                 goto alloc_fmr_exit0;
712         }
713         if (mr_access_flags & IB_ACCESS_MW_BIND) {
714                 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
715                          mr_access_flags);
716                 ib_fmr = ERR_PTR(-EINVAL);
717                 goto alloc_fmr_exit0;
718         }
719         if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
720                 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
721                          "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
722                          fmr_attr->max_pages, fmr_attr->max_maps,
723                          fmr_attr->page_shift);
724                 ib_fmr = ERR_PTR(-EINVAL);
725                 goto alloc_fmr_exit0;
726         }
727         if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
728             ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
729                 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
730                          fmr_attr->page_shift);
731                 ib_fmr = ERR_PTR(-EINVAL);
732                 goto alloc_fmr_exit0;
733         }
734
735         e_fmr = ehca_mr_new();
736         if (!e_fmr) {
737                 ib_fmr = ERR_PTR(-ENOMEM);
738                 goto alloc_fmr_exit0;
739         }
740         e_fmr->flags |= EHCA_MR_FLAG_FMR;
741
742         /* register MR on HCA */
743         ret = ehca_reg_mr(shca, e_fmr, NULL,
744                           fmr_attr->max_pages * (1 << fmr_attr->page_shift),
745                           mr_access_flags, e_pd, &pginfo,
746                           &tmp_lkey, &tmp_rkey);
747         if (ret) {
748                 ib_fmr = ERR_PTR(ret);
749                 goto alloc_fmr_exit1;
750         }
751
752         /* successful */
753         e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
754         e_fmr->fmr_max_pages = fmr_attr->max_pages;
755         e_fmr->fmr_max_maps = fmr_attr->max_maps;
756         e_fmr->fmr_map_cnt = 0;
757         return &e_fmr->ib.ib_fmr;
758
759 alloc_fmr_exit1:
760         ehca_mr_delete(e_fmr);
761 alloc_fmr_exit0:
762         if (IS_ERR(ib_fmr))
763                 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
764                          "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
765                          mr_access_flags, fmr_attr);
766         return ib_fmr;
767 } /* end ehca_alloc_fmr() */
768
769 /*----------------------------------------------------------------------*/
770
771 int ehca_map_phys_fmr(struct ib_fmr *fmr,
772                       u64 *page_list,
773                       int list_len,
774                       u64 iova)
775 {
776         int ret;
777         struct ehca_shca *shca =
778                 container_of(fmr->device, struct ehca_shca, ib_device);
779         struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
780         struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
781         struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
782         u32 tmp_lkey, tmp_rkey;
783
784         if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
785                 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
786                          e_fmr, e_fmr->flags);
787                 ret = -EINVAL;
788                 goto map_phys_fmr_exit0;
789         }
790         ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
791         if (ret)
792                 goto map_phys_fmr_exit0;
793         if (iova % e_fmr->fmr_page_size) {
794                 /* only whole-numbered pages */
795                 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
796                          iova, e_fmr->fmr_page_size);
797                 ret = -EINVAL;
798                 goto map_phys_fmr_exit0;
799         }
800         if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
801                 /* HCAD does not limit the maps, however trace this anyway */
802                 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
803                           "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
804                           fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
805         }
806
807         pginfo.type      = EHCA_MR_PGI_FMR;
808         pginfo.num_pages = list_len;
809         pginfo.num_4k    = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
810         pginfo.page_list = page_list;
811         pginfo.next_4k   = ((iova & (e_fmr->fmr_page_size-1)) /
812                             EHCA_PAGESIZE);
813
814         ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
815                             list_len * e_fmr->fmr_page_size,
816                             e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
817         if (ret)
818                 goto map_phys_fmr_exit0;
819
820         /* successful reregistration */
821         e_fmr->fmr_map_cnt++;
822         e_fmr->ib.ib_fmr.lkey = tmp_lkey;
823         e_fmr->ib.ib_fmr.rkey = tmp_rkey;
824         return 0;
825
826 map_phys_fmr_exit0:
827         if (ret)
828                 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
829                          "iova=%lx",
830                          ret, fmr, page_list, list_len, iova);
831         return ret;
832 } /* end ehca_map_phys_fmr() */
833
834 /*----------------------------------------------------------------------*/
835
836 int ehca_unmap_fmr(struct list_head *fmr_list)
837 {
838         int ret = 0;
839         struct ib_fmr *ib_fmr;
840         struct ehca_shca *shca = NULL;
841         struct ehca_shca *prev_shca;
842         struct ehca_mr *e_fmr;
843         u32 num_fmr = 0;
844         u32 unmap_fmr_cnt = 0;
845
846         /* check all FMR belong to same SHCA, and check internal flag */
847         list_for_each_entry(ib_fmr, fmr_list, list) {
848                 prev_shca = shca;
849                 if (!ib_fmr) {
850                         ehca_gen_err("bad fmr=%p in list", ib_fmr);
851                         ret = -EINVAL;
852                         goto unmap_fmr_exit0;
853                 }
854                 shca = container_of(ib_fmr->device, struct ehca_shca,
855                                     ib_device);
856                 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
857                 if ((shca != prev_shca) && prev_shca) {
858                         ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
859                                  "prev_shca=%p e_fmr=%p",
860                                  shca, prev_shca, e_fmr);
861                         ret = -EINVAL;
862                         goto unmap_fmr_exit0;
863                 }
864                 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
865                         ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
866                                  "e_fmr->flags=%x", e_fmr, e_fmr->flags);
867                         ret = -EINVAL;
868                         goto unmap_fmr_exit0;
869                 }
870                 num_fmr++;
871         }
872
873         /* loop over all FMRs to unmap */
874         list_for_each_entry(ib_fmr, fmr_list, list) {
875                 unmap_fmr_cnt++;
876                 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
877                 shca = container_of(ib_fmr->device, struct ehca_shca,
878                                     ib_device);
879                 ret = ehca_unmap_one_fmr(shca, e_fmr);
880                 if (ret) {
881                         /* unmap failed, stop unmapping of rest of FMRs */
882                         ehca_err(&shca->ib_device, "unmap of one FMR failed, "
883                                  "stop rest, e_fmr=%p num_fmr=%x "
884                                  "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
885                                  unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
886                         goto unmap_fmr_exit0;
887                 }
888         }
889
890 unmap_fmr_exit0:
891         if (ret)
892                 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
893                              ret, fmr_list, num_fmr, unmap_fmr_cnt);
894         return ret;
895 } /* end ehca_unmap_fmr() */
896
897 /*----------------------------------------------------------------------*/
898
899 int ehca_dealloc_fmr(struct ib_fmr *fmr)
900 {
901         int ret;
902         u64 h_ret;
903         struct ehca_shca *shca =
904                 container_of(fmr->device, struct ehca_shca, ib_device);
905         struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
906
907         if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
908                 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
909                          e_fmr, e_fmr->flags);
910                 ret = -EINVAL;
911                 goto free_fmr_exit0;
912         }
913
914         h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
915         if (h_ret != H_SUCCESS) {
916                 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
917                          "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
918                          h_ret, e_fmr, shca->ipz_hca_handle.handle,
919                          e_fmr->ipz_mr_handle.handle, fmr->lkey);
920                 ret = ehca_mrmw_map_hrc_free_mr(h_ret);
921                 goto free_fmr_exit0;
922         }
923         /* successful deregistration */
924         ehca_mr_delete(e_fmr);
925         return 0;
926
927 free_fmr_exit0:
928         if (ret)
929                 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
930         return ret;
931 } /* end ehca_dealloc_fmr() */
932
933 /*----------------------------------------------------------------------*/
934
935 int ehca_reg_mr(struct ehca_shca *shca,
936                 struct ehca_mr *e_mr,
937                 u64 *iova_start,
938                 u64 size,
939                 int acl,
940                 struct ehca_pd *e_pd,
941                 struct ehca_mr_pginfo *pginfo,
942                 u32 *lkey, /*OUT*/
943                 u32 *rkey) /*OUT*/
944 {
945         int ret;
946         u64 h_ret;
947         u32 hipz_acl;
948         struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
949
950         ehca_mrmw_map_acl(acl, &hipz_acl);
951         ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
952         if (ehca_use_hp_mr == 1)
953                 hipz_acl |= 0x00000001;
954
955         h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
956                                          (u64)iova_start, size, hipz_acl,
957                                          e_pd->fw_pd, &hipzout);
958         if (h_ret != H_SUCCESS) {
959                 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
960                          "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
961                 ret = ehca_mrmw_map_hrc_alloc(h_ret);
962                 goto ehca_reg_mr_exit0;
963         }
964
965         e_mr->ipz_mr_handle = hipzout.handle;
966
967         ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
968         if (ret)
969                 goto ehca_reg_mr_exit1;
970
971         /* successful registration */
972         e_mr->num_pages = pginfo->num_pages;
973         e_mr->num_4k    = pginfo->num_4k;
974         e_mr->start     = iova_start;
975         e_mr->size      = size;
976         e_mr->acl       = acl;
977         *lkey = hipzout.lkey;
978         *rkey = hipzout.rkey;
979         return 0;
980
981 ehca_reg_mr_exit1:
982         h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
983         if (h_ret != H_SUCCESS) {
984                 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
985                          "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
986                          "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
987                          h_ret, shca, e_mr, iova_start, size, acl, e_pd,
988                          hipzout.lkey, pginfo, pginfo->num_pages,
989                          pginfo->num_4k, ret);
990                 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
991                          "not recoverable");
992         }
993 ehca_reg_mr_exit0:
994         if (ret)
995                 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
996                          "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
997                          "num_pages=%lx num_4k=%lx",
998                          ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
999                          pginfo->num_pages, pginfo->num_4k);
1000         return ret;
1001 } /* end ehca_reg_mr() */
1002
1003 /*----------------------------------------------------------------------*/
1004
1005 int ehca_reg_mr_rpages(struct ehca_shca *shca,
1006                        struct ehca_mr *e_mr,
1007                        struct ehca_mr_pginfo *pginfo)
1008 {
1009         int ret = 0;
1010         u64 h_ret;
1011         u32 rnum;
1012         u64 rpage;
1013         u32 i;
1014         u64 *kpage;
1015
1016         kpage = ehca_alloc_fw_ctrlblock();
1017         if (!kpage) {
1018                 ehca_err(&shca->ib_device, "kpage alloc failed");
1019                 ret = -ENOMEM;
1020                 goto ehca_reg_mr_rpages_exit0;
1021         }
1022
1023         /* max 512 pages per shot */
1024         for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
1025
1026                 if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1027                         rnum = pginfo->num_4k % 512; /* last shot */
1028                         if (rnum == 0)
1029                                 rnum = 512;      /* last shot is full */
1030                 } else
1031                         rnum = 512;
1032
1033                 if (rnum > 1) {
1034                         ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
1035                         if (ret) {
1036                                 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1037                                          "bad rc, ret=%x rnum=%x kpage=%p",
1038                                          ret, rnum, kpage);
1039                                 ret = -EFAULT;
1040                                 goto ehca_reg_mr_rpages_exit1;
1041                         }
1042                         rpage = virt_to_abs(kpage);
1043                         if (!rpage) {
1044                                 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1045                                          kpage, i);
1046                                 ret = -EFAULT;
1047                                 goto ehca_reg_mr_rpages_exit1;
1048                         }
1049                 } else {  /* rnum==1 */
1050                         ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
1051                         if (ret) {
1052                                 ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
1053                                          "bad rc, ret=%x i=%x", ret, i);
1054                                 ret = -EFAULT;
1055                                 goto ehca_reg_mr_rpages_exit1;
1056                         }
1057                 }
1058
1059                 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1060                                                  0, /* pagesize 4k */
1061                                                  0, rpage, rnum);
1062
1063                 if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1064                         /*
1065                          * check for 'registration complete'==H_SUCCESS
1066                          * and for 'page registered'==H_PAGE_REGISTERED
1067                          */
1068                         if (h_ret != H_SUCCESS) {
1069                                 ehca_err(&shca->ib_device, "last "
1070                                          "hipz_reg_rpage_mr failed, h_ret=%lx "
1071                                          "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1072                                          " lkey=%x", h_ret, e_mr, i,
1073                                          shca->ipz_hca_handle.handle,
1074                                          e_mr->ipz_mr_handle.handle,
1075                                          e_mr->ib.ib_mr.lkey);
1076                                 ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
1077                                 break;
1078                         } else
1079                                 ret = 0;
1080                 } else if (h_ret != H_PAGE_REGISTERED) {
1081                         ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1082                                  "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1083                                  "mr_hndl=%lx", h_ret, e_mr, i,
1084                                  e_mr->ib.ib_mr.lkey,
1085                                  shca->ipz_hca_handle.handle,
1086                                  e_mr->ipz_mr_handle.handle);
1087                         ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
1088                         break;
1089                 } else
1090                         ret = 0;
1091         } /* end for(i) */
1092
1093
1094 ehca_reg_mr_rpages_exit1:
1095         ehca_free_fw_ctrlblock(kpage);
1096 ehca_reg_mr_rpages_exit0:
1097         if (ret)
1098                 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1099                          "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
1100                          pginfo->num_pages, pginfo->num_4k);
1101         return ret;
1102 } /* end ehca_reg_mr_rpages() */
1103
1104 /*----------------------------------------------------------------------*/
1105
1106 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1107                                 struct ehca_mr *e_mr,
1108                                 u64 *iova_start,
1109                                 u64 size,
1110                                 u32 acl,
1111                                 struct ehca_pd *e_pd,
1112                                 struct ehca_mr_pginfo *pginfo,
1113                                 u32 *lkey, /*OUT*/
1114                                 u32 *rkey) /*OUT*/
1115 {
1116         int ret;
1117         u64 h_ret;
1118         u32 hipz_acl;
1119         u64 *kpage;
1120         u64 rpage;
1121         struct ehca_mr_pginfo pginfo_save;
1122         struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1123
1124         ehca_mrmw_map_acl(acl, &hipz_acl);
1125         ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1126
1127         kpage = ehca_alloc_fw_ctrlblock();
1128         if (!kpage) {
1129                 ehca_err(&shca->ib_device, "kpage alloc failed");
1130                 ret = -ENOMEM;
1131                 goto ehca_rereg_mr_rereg1_exit0;
1132         }
1133
1134         pginfo_save = *pginfo;
1135         ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
1136         if (ret) {
1137                 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1138                          "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1139                          e_mr, pginfo, pginfo->type, pginfo->num_pages,
1140                          pginfo->num_4k,kpage);
1141                 goto ehca_rereg_mr_rereg1_exit1;
1142         }
1143         rpage = virt_to_abs(kpage);
1144         if (!rpage) {
1145                 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1146                 ret = -EFAULT;
1147                 goto ehca_rereg_mr_rereg1_exit1;
1148         }
1149         h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1150                                       (u64)iova_start, size, hipz_acl,
1151                                       e_pd->fw_pd, rpage, &hipzout);
1152         if (h_ret != H_SUCCESS) {
1153                 /*
1154                  * reregistration unsuccessful, try it again with the 3 hCalls,
1155                  * e.g. this is required in case H_MR_CONDITION
1156                  * (MW bound or MR is shared)
1157                  */
1158                 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1159                           "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1160                 *pginfo = pginfo_save;
1161                 ret = -EAGAIN;
1162         } else if ((u64*)hipzout.vaddr != iova_start) {
1163                 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1164                          "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1165                          "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1166                          hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1167                          e_mr->ib.ib_mr.lkey, hipzout.lkey);
1168                 ret = -EFAULT;
1169         } else {
1170                 /*
1171                  * successful reregistration
1172                  * note: start and start_out are identical for eServer HCAs
1173                  */
1174                 e_mr->num_pages = pginfo->num_pages;
1175                 e_mr->num_4k    = pginfo->num_4k;
1176                 e_mr->start     = iova_start;
1177                 e_mr->size      = size;
1178                 e_mr->acl       = acl;
1179                 *lkey = hipzout.lkey;
1180                 *rkey = hipzout.rkey;
1181         }
1182
1183 ehca_rereg_mr_rereg1_exit1:
1184         ehca_free_fw_ctrlblock(kpage);
1185 ehca_rereg_mr_rereg1_exit0:
1186         if ( ret && (ret != -EAGAIN) )
1187                 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1188                          "pginfo=%p num_pages=%lx num_4k=%lx",
1189                          ret, *lkey, *rkey, pginfo, pginfo->num_pages,
1190                          pginfo->num_4k);
1191         return ret;
1192 } /* end ehca_rereg_mr_rereg1() */
1193
1194 /*----------------------------------------------------------------------*/
1195
1196 int ehca_rereg_mr(struct ehca_shca *shca,
1197                   struct ehca_mr *e_mr,
1198                   u64 *iova_start,
1199                   u64 size,
1200                   int acl,
1201                   struct ehca_pd *e_pd,
1202                   struct ehca_mr_pginfo *pginfo,
1203                   u32 *lkey,
1204                   u32 *rkey)
1205 {
1206         int ret = 0;
1207         u64 h_ret;
1208         int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1209         int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1210
1211         /* first determine reregistration hCall(s) */
1212         if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
1213             (pginfo->num_4k > e_mr->num_4k)) {
1214                 ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
1215                          "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
1216                 rereg_1_hcall = 0;
1217                 rereg_3_hcall = 1;
1218         }
1219
1220         if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1221                 rereg_1_hcall = 0;
1222                 rereg_3_hcall = 1;
1223                 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1224                 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1225                          e_mr);
1226         }
1227
1228         if (rereg_1_hcall) {
1229                 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1230                                            acl, e_pd, pginfo, lkey, rkey);
1231                 if (ret) {
1232                         if (ret == -EAGAIN)
1233                                 rereg_3_hcall = 1;
1234                         else
1235                                 goto ehca_rereg_mr_exit0;
1236                 }
1237         }
1238
1239         if (rereg_3_hcall) {
1240                 struct ehca_mr save_mr;
1241
1242                 /* first deregister old MR */
1243                 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1244                 if (h_ret != H_SUCCESS) {
1245                         ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1246                                  "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1247                                  "mr->lkey=%x",
1248                                  h_ret, e_mr, shca->ipz_hca_handle.handle,
1249                                  e_mr->ipz_mr_handle.handle,
1250                                  e_mr->ib.ib_mr.lkey);
1251                         ret = ehca_mrmw_map_hrc_free_mr(h_ret);
1252                         goto ehca_rereg_mr_exit0;
1253                 }
1254                 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1255                 save_mr = *e_mr;
1256                 ehca_mr_deletenew(e_mr);
1257
1258                 /* set some MR values */
1259                 e_mr->flags = save_mr.flags;
1260                 e_mr->fmr_page_size = save_mr.fmr_page_size;
1261                 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1262                 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1263                 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1264
1265                 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1266                                       e_pd, pginfo, lkey, rkey);
1267                 if (ret) {
1268                         u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1269                         memcpy(&e_mr->flags, &(save_mr.flags),
1270                                sizeof(struct ehca_mr) - offset);
1271                         goto ehca_rereg_mr_exit0;
1272                 }
1273         }
1274
1275 ehca_rereg_mr_exit0:
1276         if (ret)
1277                 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1278                          "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1279                          "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1280                          "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1281                          acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
1282                          rereg_1_hcall, rereg_3_hcall);
1283         return ret;
1284 } /* end ehca_rereg_mr() */
1285
1286 /*----------------------------------------------------------------------*/
1287
1288 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1289                        struct ehca_mr *e_fmr)
1290 {
1291         int ret = 0;
1292         u64 h_ret;
1293         int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
1294         int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
1295         struct ehca_pd *e_pd =
1296                 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1297         struct ehca_mr save_fmr;
1298         u32 tmp_lkey, tmp_rkey;
1299         struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1300         struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1301
1302         /* first check if reregistration hCall can be used for unmap */
1303         if (e_fmr->fmr_max_pages > 512) {
1304                 rereg_1_hcall = 0;
1305                 rereg_3_hcall = 1;
1306         }
1307
1308         if (rereg_1_hcall) {
1309                 /*
1310                  * note: after using rereg hcall with len=0,
1311                  * rereg hcall must be used again for registering pages
1312                  */
1313                 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1314                                               0, 0, e_pd->fw_pd, 0, &hipzout);
1315                 if (h_ret != H_SUCCESS) {
1316                         /*
1317                          * should not happen, because length checked above,
1318                          * FMRs are not shared and no MW bound to FMRs
1319                          */
1320                         ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1321                                  "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1322                                  "mr_hndl=%lx lkey=%x lkey_out=%x",
1323                                  h_ret, e_fmr, shca->ipz_hca_handle.handle,
1324                                  e_fmr->ipz_mr_handle.handle,
1325                                  e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1326                         rereg_3_hcall = 1;
1327                 } else {
1328                         /* successful reregistration */
1329                         e_fmr->start = NULL;
1330                         e_fmr->size = 0;
1331                         tmp_lkey = hipzout.lkey;
1332                         tmp_rkey = hipzout.rkey;
1333                 }
1334         }
1335
1336         if (rereg_3_hcall) {
1337                 struct ehca_mr save_mr;
1338
1339                 /* first free old FMR */
1340                 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1341                 if (h_ret != H_SUCCESS) {
1342                         ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1343                                  "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1344                                  "lkey=%x",
1345                                  h_ret, e_fmr, shca->ipz_hca_handle.handle,
1346                                  e_fmr->ipz_mr_handle.handle,
1347                                  e_fmr->ib.ib_fmr.lkey);
1348                         ret = ehca_mrmw_map_hrc_free_mr(h_ret);
1349                         goto ehca_unmap_one_fmr_exit0;
1350                 }
1351                 /* clean ehca_mr_t, without changing lock */
1352                 save_fmr = *e_fmr;
1353                 ehca_mr_deletenew(e_fmr);
1354
1355                 /* set some MR values */
1356                 e_fmr->flags = save_fmr.flags;
1357                 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1358                 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1359                 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1360                 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1361                 e_fmr->acl = save_fmr.acl;
1362
1363                 pginfo.type      = EHCA_MR_PGI_FMR;
1364                 pginfo.num_pages = 0;
1365                 pginfo.num_4k    = 0;
1366                 ret = ehca_reg_mr(shca, e_fmr, NULL,
1367                                   (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1368                                   e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1369                                   &tmp_rkey);
1370                 if (ret) {
1371                         u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1372                         memcpy(&e_fmr->flags, &(save_mr.flags),
1373                                sizeof(struct ehca_mr) - offset);
1374                         goto ehca_unmap_one_fmr_exit0;
1375                 }
1376         }
1377
1378 ehca_unmap_one_fmr_exit0:
1379         if (ret)
1380                 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1381                          "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1382                          ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
1383                          rereg_1_hcall, rereg_3_hcall);
1384         return ret;
1385 } /* end ehca_unmap_one_fmr() */
1386
1387 /*----------------------------------------------------------------------*/
1388
1389 int ehca_reg_smr(struct ehca_shca *shca,
1390                  struct ehca_mr *e_origmr,
1391                  struct ehca_mr *e_newmr,
1392                  u64 *iova_start,
1393                  int acl,
1394                  struct ehca_pd *e_pd,
1395                  u32 *lkey, /*OUT*/
1396                  u32 *rkey) /*OUT*/
1397 {
1398         int ret = 0;
1399         u64 h_ret;
1400         u32 hipz_acl;
1401         struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1402
1403         ehca_mrmw_map_acl(acl, &hipz_acl);
1404         ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1405
1406         h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1407                                     (u64)iova_start, hipz_acl, e_pd->fw_pd,
1408                                     &hipzout);
1409         if (h_ret != H_SUCCESS) {
1410                 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1411                          "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1412                          "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1413                          h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1414                          shca->ipz_hca_handle.handle,
1415                          e_origmr->ipz_mr_handle.handle,
1416                          e_origmr->ib.ib_mr.lkey);
1417                 ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
1418                 goto ehca_reg_smr_exit0;
1419         }
1420         /* successful registration */
1421         e_newmr->num_pages     = e_origmr->num_pages;
1422         e_newmr->num_4k        = e_origmr->num_4k;
1423         e_newmr->start         = iova_start;
1424         e_newmr->size          = e_origmr->size;
1425         e_newmr->acl           = acl;
1426         e_newmr->ipz_mr_handle = hipzout.handle;
1427         *lkey = hipzout.lkey;
1428         *rkey = hipzout.rkey;
1429         return 0;
1430
1431 ehca_reg_smr_exit0:
1432         if (ret)
1433                 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1434                          "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1435                          ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1436         return ret;
1437 } /* end ehca_reg_smr() */
1438
1439 /*----------------------------------------------------------------------*/
1440
1441 /* register internal max-MR to internal SHCA */
1442 int ehca_reg_internal_maxmr(
1443         struct ehca_shca *shca,
1444         struct ehca_pd *e_pd,
1445         struct ehca_mr **e_maxmr)  /*OUT*/
1446 {
1447         int ret;
1448         struct ehca_mr *e_mr;
1449         u64 *iova_start;
1450         u64 size_maxmr;
1451         struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1452         struct ib_phys_buf ib_pbuf;
1453         u32 num_pages_mr;
1454         u32 num_pages_4k; /* 4k portion "pages" */
1455
1456         e_mr = ehca_mr_new();
1457         if (!e_mr) {
1458                 ehca_err(&shca->ib_device, "out of memory");
1459                 ret = -ENOMEM;
1460                 goto ehca_reg_internal_maxmr_exit0;
1461         }
1462         e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1463
1464         /* register internal max-MR on HCA */
1465         size_maxmr = (u64)high_memory - PAGE_OFFSET;
1466         iova_start = (u64*)KERNELBASE;
1467         ib_pbuf.addr = 0;
1468         ib_pbuf.size = size_maxmr;
1469         num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
1470                          PAGE_SIZE - 1) / PAGE_SIZE);
1471         num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
1472                          EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
1473
1474         pginfo.type           = EHCA_MR_PGI_PHYS;
1475         pginfo.num_pages      = num_pages_mr;
1476         pginfo.num_4k         = num_pages_4k;
1477         pginfo.num_phys_buf   = 1;
1478         pginfo.phys_buf_array = &ib_pbuf;
1479
1480         ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1481                           &pginfo, &e_mr->ib.ib_mr.lkey,
1482                           &e_mr->ib.ib_mr.rkey);
1483         if (ret) {
1484                 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1485                          "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1486                          "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
1487                          num_pages_mr, num_pages_4k);
1488                 goto ehca_reg_internal_maxmr_exit1;
1489         }
1490
1491         /* successful registration of all pages */
1492         e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1493         e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1494         e_mr->ib.ib_mr.uobject = NULL;
1495         atomic_inc(&(e_pd->ib_pd.usecnt));
1496         atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1497         *e_maxmr = e_mr;
1498         return 0;
1499
1500 ehca_reg_internal_maxmr_exit1:
1501         ehca_mr_delete(e_mr);
1502 ehca_reg_internal_maxmr_exit0:
1503         if (ret)
1504                 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1505                          ret, shca, e_pd, e_maxmr);
1506         return ret;
1507 } /* end ehca_reg_internal_maxmr() */
1508
1509 /*----------------------------------------------------------------------*/
1510
1511 int ehca_reg_maxmr(struct ehca_shca *shca,
1512                    struct ehca_mr *e_newmr,
1513                    u64 *iova_start,
1514                    int acl,
1515                    struct ehca_pd *e_pd,
1516                    u32 *lkey,
1517                    u32 *rkey)
1518 {
1519         u64 h_ret;
1520         struct ehca_mr *e_origmr = shca->maxmr;
1521         u32 hipz_acl;
1522         struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1523
1524         ehca_mrmw_map_acl(acl, &hipz_acl);
1525         ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1526
1527         h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1528                                     (u64)iova_start, hipz_acl, e_pd->fw_pd,
1529                                     &hipzout);
1530         if (h_ret != H_SUCCESS) {
1531                 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1532                          "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1533                          h_ret, e_origmr, shca->ipz_hca_handle.handle,
1534                          e_origmr->ipz_mr_handle.handle,
1535                          e_origmr->ib.ib_mr.lkey);
1536                 return ehca_mrmw_map_hrc_reg_smr(h_ret);
1537         }
1538         /* successful registration */
1539         e_newmr->num_pages     = e_origmr->num_pages;
1540         e_newmr->num_4k        = e_origmr->num_4k;
1541         e_newmr->start         = iova_start;
1542         e_newmr->size          = e_origmr->size;
1543         e_newmr->acl           = acl;
1544         e_newmr->ipz_mr_handle = hipzout.handle;
1545         *lkey = hipzout.lkey;
1546         *rkey = hipzout.rkey;
1547         return 0;
1548 } /* end ehca_reg_maxmr() */
1549
1550 /*----------------------------------------------------------------------*/
1551
1552 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1553 {
1554         int ret;
1555         struct ehca_mr *e_maxmr;
1556         struct ib_pd *ib_pd;
1557
1558         if (!shca->maxmr) {
1559                 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1560                 ret = -EINVAL;
1561                 goto ehca_dereg_internal_maxmr_exit0;
1562         }
1563
1564         e_maxmr = shca->maxmr;
1565         ib_pd = e_maxmr->ib.ib_mr.pd;
1566         shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1567
1568         ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1569         if (ret) {
1570                 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1571                          "ret=%x e_maxmr=%p shca=%p lkey=%x",
1572                          ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1573                 shca->maxmr = e_maxmr;
1574                 goto ehca_dereg_internal_maxmr_exit0;
1575         }
1576
1577         atomic_dec(&ib_pd->usecnt);
1578
1579 ehca_dereg_internal_maxmr_exit0:
1580         if (ret)
1581                 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1582                          ret, shca, shca->maxmr);
1583         return ret;
1584 } /* end ehca_dereg_internal_maxmr() */
1585
1586 /*----------------------------------------------------------------------*/
1587
1588 /*
1589  * check physical buffer array of MR verbs for validness and
1590  * calculates MR size
1591  */
1592 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1593                                   int num_phys_buf,
1594                                   u64 *iova_start,
1595                                   u64 *size)
1596 {
1597         struct ib_phys_buf *pbuf = phys_buf_array;
1598         u64 size_count = 0;
1599         u32 i;
1600
1601         if (num_phys_buf == 0) {
1602                 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1603                 return -EINVAL;
1604         }
1605         /* check first buffer */
1606         if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1607                 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1608                              "pbuf->addr=%lx pbuf->size=%lx",
1609                              iova_start, pbuf->addr, pbuf->size);
1610                 return -EINVAL;
1611         }
1612         if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1613             (num_phys_buf > 1)) {
1614                 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1615                              "pbuf->size=%lx", pbuf->addr, pbuf->size);
1616                 return -EINVAL;
1617         }
1618
1619         for (i = 0; i < num_phys_buf; i++) {
1620                 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1621                         ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1622                                      "pbuf->size=%lx",
1623                                      i, pbuf->addr, pbuf->size);
1624                         return -EINVAL;
1625                 }
1626                 if (((i > 0) && /* not 1st */
1627                      (i < (num_phys_buf - 1)) &&        /* not last */
1628                      (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1629                         ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1630                                      i, pbuf->size);
1631                         return -EINVAL;
1632                 }
1633                 size_count += pbuf->size;
1634                 pbuf++;
1635         }
1636
1637         *size = size_count;
1638         return 0;
1639 } /* end ehca_mr_chk_buf_and_calc_size() */
1640
1641 /*----------------------------------------------------------------------*/
1642
1643 /* check page list of map FMR verb for validness */
1644 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1645                              u64 *page_list,
1646                              int list_len)
1647 {
1648         u32 i;
1649         u64 *page;
1650
1651         if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1652                 ehca_gen_err("bad list_len, list_len=%x "
1653                              "e_fmr->fmr_max_pages=%x fmr=%p",
1654                              list_len, e_fmr->fmr_max_pages, e_fmr);
1655                 return -EINVAL;
1656         }
1657
1658         /* each page must be aligned */
1659         page = page_list;
1660         for (i = 0; i < list_len; i++) {
1661                 if (*page % e_fmr->fmr_page_size) {
1662                         ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1663                                      "fmr_page_size=%x", i, *page, page, e_fmr,
1664                                      e_fmr->fmr_page_size);
1665                         return -EINVAL;
1666                 }
1667                 page++;
1668         }
1669
1670         return 0;
1671 } /* end ehca_fmr_check_page_list() */
1672
1673 /*----------------------------------------------------------------------*/
1674
1675 /* setup page buffer from page info */
1676 int ehca_set_pagebuf(struct ehca_mr *e_mr,
1677                      struct ehca_mr_pginfo *pginfo,
1678                      u32 number,
1679                      u64 *kpage)
1680 {
1681         int ret = 0;
1682         struct ib_umem_chunk *prev_chunk;
1683         struct ib_umem_chunk *chunk;
1684         struct ib_phys_buf *pbuf;
1685         u64 *fmrlist;
1686         u64 num4k, pgaddr, offs4k;
1687         u32 i = 0;
1688         u32 j = 0;
1689
1690         if (pginfo->type == EHCA_MR_PGI_PHYS) {
1691                 /* loop over desired phys_buf_array entries */
1692                 while (i < number) {
1693                         pbuf   = pginfo->phys_buf_array + pginfo->next_buf;
1694                         num4k  = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
1695                                   EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1696                         offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1697                         while (pginfo->next_4k < offs4k + num4k) {
1698                                 /* sanity check */
1699                                 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1700                                     (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1701                                         ehca_gen_err("page_cnt >= num_pages, "
1702                                                      "page_cnt=%lx "
1703                                                      "num_pages=%lx "
1704                                                      "page_4k_cnt=%lx "
1705                                                      "num_4k=%lx i=%x",
1706                                                      pginfo->page_cnt,
1707                                                      pginfo->num_pages,
1708                                                      pginfo->page_4k_cnt,
1709                                                      pginfo->num_4k, i);
1710                                         ret = -EFAULT;
1711                                         goto ehca_set_pagebuf_exit0;
1712                                 }
1713                                 *kpage = phys_to_abs(
1714                                         (pbuf->addr & EHCA_PAGEMASK)
1715                                         + (pginfo->next_4k * EHCA_PAGESIZE));
1716                                 if ( !(*kpage) && pbuf->addr ) {
1717                                         ehca_gen_err("pbuf->addr=%lx "
1718                                                      "pbuf->size=%lx "
1719                                                      "next_4k=%lx", pbuf->addr,
1720                                                      pbuf->size,
1721                                                      pginfo->next_4k);
1722                                         ret = -EFAULT;
1723                                         goto ehca_set_pagebuf_exit0;
1724                                 }
1725                                 (pginfo->page_4k_cnt)++;
1726                                 (pginfo->next_4k)++;
1727                                 if (pginfo->next_4k %
1728                                     (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1729                                         (pginfo->page_cnt)++;
1730                                 kpage++;
1731                                 i++;
1732                                 if (i >= number) break;
1733                         }
1734                         if (pginfo->next_4k >= offs4k + num4k) {
1735                                 (pginfo->next_buf)++;
1736                                 pginfo->next_4k = 0;
1737                         }
1738                 }
1739         } else if (pginfo->type == EHCA_MR_PGI_USER) {
1740                 /* loop over desired chunk entries */
1741                 chunk      = pginfo->next_chunk;
1742                 prev_chunk = pginfo->next_chunk;
1743                 list_for_each_entry_continue(chunk,
1744                                              (&(pginfo->region->chunk_list)),
1745                                              list) {
1746                         for (i = pginfo->next_nmap; i < chunk->nmap; ) {
1747                                 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1748                                            << PAGE_SHIFT );
1749                                 *kpage = phys_to_abs(pgaddr +
1750                                                      (pginfo->next_4k *
1751                                                       EHCA_PAGESIZE));
1752                                 if ( !(*kpage) ) {
1753                                         ehca_gen_err("pgaddr=%lx "
1754                                                      "chunk->page_list[i]=%lx "
1755                                                      "i=%x next_4k=%lx mr=%p",
1756                                                      pgaddr,
1757                                                      (u64)sg_dma_address(
1758                                                              &chunk->
1759                                                              page_list[i]),
1760                                                      i, pginfo->next_4k, e_mr);
1761                                         ret = -EFAULT;
1762                                         goto ehca_set_pagebuf_exit0;
1763                                 }
1764                                 (pginfo->page_4k_cnt)++;
1765                                 (pginfo->next_4k)++;
1766                                 kpage++;
1767                                 if (pginfo->next_4k %
1768                                     (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1769                                         (pginfo->page_cnt)++;
1770                                         (pginfo->next_nmap)++;
1771                                         pginfo->next_4k = 0;
1772                                         i++;
1773                                 }
1774                                 j++;
1775                                 if (j >= number) break;
1776                         }
1777                         if ((pginfo->next_nmap >= chunk->nmap) &&
1778                             (j >= number)) {
1779                                 pginfo->next_nmap = 0;
1780                                 prev_chunk = chunk;
1781                                 break;
1782                         } else if (pginfo->next_nmap >= chunk->nmap) {
1783                                 pginfo->next_nmap = 0;
1784                                 prev_chunk = chunk;
1785                         } else if (j >= number)
1786                                 break;
1787                         else
1788                                 prev_chunk = chunk;
1789                 }
1790                 pginfo->next_chunk =
1791                         list_prepare_entry(prev_chunk,
1792                                            (&(pginfo->region->chunk_list)),
1793                                            list);
1794         } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1795                 /* loop over desired page_list entries */
1796                 fmrlist = pginfo->page_list + pginfo->next_listelem;
1797                 for (i = 0; i < number; i++) {
1798                         *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1799                                              pginfo->next_4k * EHCA_PAGESIZE);
1800                         if ( !(*kpage) ) {
1801                                 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1802                                              "next_listelem=%lx next_4k=%lx",
1803                                              *fmrlist, fmrlist,
1804                                              pginfo->next_listelem,
1805                                              pginfo->next_4k);
1806                                 ret = -EFAULT;
1807                                 goto ehca_set_pagebuf_exit0;
1808                         }
1809                         (pginfo->page_4k_cnt)++;
1810                         (pginfo->next_4k)++;
1811                         kpage++;
1812                         if (pginfo->next_4k %
1813                             (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1814                                 (pginfo->page_cnt)++;
1815                                 (pginfo->next_listelem)++;
1816                                 fmrlist++;
1817                                 pginfo->next_4k = 0;
1818                         }
1819                 }
1820         } else {
1821                 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1822                 ret = -EFAULT;
1823                 goto ehca_set_pagebuf_exit0;
1824         }
1825
1826 ehca_set_pagebuf_exit0:
1827         if (ret)
1828                 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1829                              "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1830                              "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1831                              "next_listelem=%lx region=%p next_chunk=%p "
1832                              "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1833                              pginfo->num_pages, pginfo->num_4k,
1834                              pginfo->next_buf, pginfo->next_4k, number, kpage,
1835                              pginfo->page_cnt, pginfo->page_4k_cnt, i,
1836                              pginfo->next_listelem, pginfo->region,
1837                              pginfo->next_chunk, pginfo->next_nmap);
1838         return ret;
1839 } /* end ehca_set_pagebuf() */
1840
1841 /*----------------------------------------------------------------------*/
1842
1843 /* setup 1 page from page info page buffer */
1844 int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1845                        struct ehca_mr_pginfo *pginfo,
1846                        u64 *rpage)
1847 {
1848         int ret = 0;
1849         struct ib_phys_buf *tmp_pbuf;
1850         u64 *fmrlist;
1851         struct ib_umem_chunk *chunk;
1852         struct ib_umem_chunk *prev_chunk;
1853         u64 pgaddr, num4k, offs4k;
1854
1855         if (pginfo->type == EHCA_MR_PGI_PHYS) {
1856                 /* sanity check */
1857                 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1858                     (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1859                         ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1860                                      "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1861                                      pginfo->page_cnt, pginfo->num_pages,
1862                                      pginfo->page_4k_cnt, pginfo->num_4k);
1863                         ret = -EFAULT;
1864                         goto ehca_set_pagebuf_1_exit0;
1865                 }
1866                 tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1867                 num4k  = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
1868                           EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1869                 offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1870                 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1871                                      (pginfo->next_4k * EHCA_PAGESIZE));
1872                 if ( !(*rpage) && tmp_pbuf->addr ) {
1873                         ehca_gen_err("tmp_pbuf->addr=%lx"
1874                                      " tmp_pbuf->size=%lx next_4k=%lx",
1875                                      tmp_pbuf->addr, tmp_pbuf->size,
1876                                      pginfo->next_4k);
1877                         ret = -EFAULT;
1878                         goto ehca_set_pagebuf_1_exit0;
1879                 }
1880                 (pginfo->page_4k_cnt)++;
1881                 (pginfo->next_4k)++;
1882                 if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1883                         (pginfo->page_cnt)++;
1884                 if (pginfo->next_4k >= offs4k + num4k) {
1885                         (pginfo->next_buf)++;
1886                         pginfo->next_4k = 0;
1887                 }
1888         } else if (pginfo->type == EHCA_MR_PGI_USER) {
1889                 chunk      = pginfo->next_chunk;
1890                 prev_chunk = pginfo->next_chunk;
1891                 list_for_each_entry_continue(chunk,
1892                                              (&(pginfo->region->chunk_list)),
1893                                              list) {
1894                         pgaddr = ( page_to_pfn(chunk->page_list[
1895                                                        pginfo->next_nmap].page)
1896                                    << PAGE_SHIFT);
1897                         *rpage = phys_to_abs(pgaddr +
1898                                              (pginfo->next_4k * EHCA_PAGESIZE));
1899                         if ( !(*rpage) ) {
1900                                 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1901                                              " next_nmap=%lx next_4k=%lx mr=%p",
1902                                              pgaddr, (u64)sg_dma_address(
1903                                                      &chunk->page_list[
1904                                                              pginfo->
1905                                                              next_nmap]),
1906                                              pginfo->next_nmap, pginfo->next_4k,
1907                                              e_mr);
1908                                 ret = -EFAULT;
1909                                 goto ehca_set_pagebuf_1_exit0;
1910                         }
1911                         (pginfo->page_4k_cnt)++;
1912                         (pginfo->next_4k)++;
1913                         if (pginfo->next_4k %
1914                             (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1915                                 (pginfo->page_cnt)++;
1916                                 (pginfo->next_nmap)++;
1917                                 pginfo->next_4k = 0;
1918                         }
1919                         if (pginfo->next_nmap >= chunk->nmap) {
1920                                 pginfo->next_nmap = 0;
1921                                 prev_chunk = chunk;
1922                         }
1923                         break;
1924                 }
1925                 pginfo->next_chunk =
1926                         list_prepare_entry(prev_chunk,
1927                                            (&(pginfo->region->chunk_list)),
1928                                            list);
1929         } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1930                 fmrlist = pginfo->page_list + pginfo->next_listelem;
1931                 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1932                                      pginfo->next_4k * EHCA_PAGESIZE);
1933                 if ( !(*rpage) ) {
1934                         ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1935                                      "next_listelem=%lx next_4k=%lx",
1936                                      *fmrlist, fmrlist, pginfo->next_listelem,
1937                                      pginfo->next_4k);
1938                         ret = -EFAULT;
1939                         goto ehca_set_pagebuf_1_exit0;
1940                 }
1941                 (pginfo->page_4k_cnt)++;
1942                 (pginfo->next_4k)++;
1943                 if (pginfo->next_4k %
1944                     (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1945                         (pginfo->page_cnt)++;
1946                         (pginfo->next_listelem)++;
1947                         pginfo->next_4k = 0;
1948                 }
1949         } else {
1950                 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1951                 ret = -EFAULT;
1952                 goto ehca_set_pagebuf_1_exit0;
1953         }
1954
1955 ehca_set_pagebuf_1_exit0:
1956         if (ret)
1957                 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1958                              "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1959                              "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1960                              "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1961                              pginfo, pginfo->type, pginfo->num_pages,
1962                              pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
1963                              rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
1964                              pginfo->next_listelem, pginfo->region,
1965                              pginfo->next_chunk, pginfo->next_nmap);
1966         return ret;
1967 } /* end ehca_set_pagebuf_1() */
1968
1969 /*----------------------------------------------------------------------*/
1970
1971 /*
1972  * check MR if it is a max-MR, i.e. uses whole memory
1973  * in case it's a max-MR 1 is returned, else 0
1974  */
1975 int ehca_mr_is_maxmr(u64 size,
1976                      u64 *iova_start)
1977 {
1978         /* a MR is treated as max-MR only if it fits following: */
1979         if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1980             (iova_start == (void*)KERNELBASE)) {
1981                 ehca_gen_dbg("this is a max-MR");
1982                 return 1;
1983         } else
1984                 return 0;
1985 } /* end ehca_mr_is_maxmr() */
1986
1987 /*----------------------------------------------------------------------*/
1988
1989 /* map access control for MR/MW. This routine is used for MR and MW. */
1990 void ehca_mrmw_map_acl(int ib_acl,
1991                        u32 *hipz_acl)
1992 {
1993         *hipz_acl = 0;
1994         if (ib_acl & IB_ACCESS_REMOTE_READ)
1995                 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1996         if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1997                 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1998         if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1999                 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2000         if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2001                 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2002         if (ib_acl & IB_ACCESS_MW_BIND)
2003                 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2004 } /* end ehca_mrmw_map_acl() */
2005
2006 /*----------------------------------------------------------------------*/
2007
2008 /* sets page size in hipz access control for MR/MW. */
2009 void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
2010 {
2011         return; /* HCA supports only 4k */
2012 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2013
2014 /*----------------------------------------------------------------------*/
2015
2016 /*
2017  * reverse map access control for MR/MW.
2018  * This routine is used for MR and MW.
2019  */
2020 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2021                                int *ib_acl) /*OUT*/
2022 {
2023         *ib_acl = 0;
2024         if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2025                 *ib_acl |= IB_ACCESS_REMOTE_READ;
2026         if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2027                 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2028         if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2029                 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2030         if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2031                 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2032         if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2033                 *ib_acl |= IB_ACCESS_MW_BIND;
2034 } /* end ehca_mrmw_reverse_map_acl() */
2035
2036
2037 /*----------------------------------------------------------------------*/
2038
2039 /*
2040  * map HIPZ rc to IB retcodes for MR/MW allocations
2041  * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
2042  */
2043 int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
2044 {
2045         switch (hipz_rc) {
2046         case H_SUCCESS:              /* successful completion */
2047                 return 0;
2048         case H_ADAPTER_PARM:         /* invalid adapter handle */
2049         case H_RT_PARM:              /* invalid resource type */
2050         case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
2051         case H_MLENGTH_PARM:         /* invalid memory length */
2052         case H_MEM_ACCESS_PARM:      /* invalid access controls */
2053         case H_CONSTRAINED:          /* resource constraint */
2054                 return -EINVAL;
2055         case H_BUSY:                 /* long busy */
2056                 return -EBUSY;
2057         default:
2058                 return -EINVAL;
2059         }
2060 } /* end ehca_mrmw_map_hrc_alloc() */
2061
2062 /*----------------------------------------------------------------------*/
2063
2064 /*
2065  * map HIPZ rc to IB retcodes for MR register rpage
2066  * Used for hipz_h_register_rpage_mr at registering last page
2067  */
2068 int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
2069 {
2070         switch (hipz_rc) {
2071         case H_SUCCESS:         /* registration complete */
2072                 return 0;
2073         case H_PAGE_REGISTERED: /* page registered */
2074         case H_ADAPTER_PARM:    /* invalid adapter handle */
2075         case H_RH_PARM:         /* invalid resource handle */
2076 /*      case H_QT_PARM:            invalid queue type */
2077         case H_PARAMETER:       /*
2078                                  * invalid logical address,
2079                                  * or count zero or greater 512
2080                                  */
2081         case H_TABLE_FULL:      /* page table full */
2082         case H_HARDWARE:        /* HCA not operational */
2083                 return -EINVAL;
2084         case H_BUSY:            /* long busy */
2085                 return -EBUSY;
2086         default:
2087                 return -EINVAL;
2088         }
2089 } /* end ehca_mrmw_map_hrc_rrpg_last() */
2090
2091 /*----------------------------------------------------------------------*/
2092
2093 /*
2094  * map HIPZ rc to IB retcodes for MR register rpage
2095  * Used for hipz_h_register_rpage_mr at registering one page, but not last page
2096  */
2097 int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
2098 {
2099         switch (hipz_rc) {
2100         case H_PAGE_REGISTERED: /* page registered */
2101                 return 0;
2102         case H_SUCCESS:         /* registration complete */
2103         case H_ADAPTER_PARM:    /* invalid adapter handle */
2104         case H_RH_PARM:         /* invalid resource handle */
2105 /*      case H_QT_PARM:            invalid queue type */
2106         case H_PARAMETER:       /*
2107                                  * invalid logical address,
2108                                  * or count zero or greater 512
2109                                  */
2110         case H_TABLE_FULL:      /* page table full */
2111         case H_HARDWARE:        /* HCA not operational */
2112                 return -EINVAL;
2113         case H_BUSY:            /* long busy */
2114                 return -EBUSY;
2115         default:
2116                 return -EINVAL;
2117         }
2118 } /* end ehca_mrmw_map_hrc_rrpg_notlast() */
2119
2120 /*----------------------------------------------------------------------*/
2121
2122 /* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
2123 int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
2124 {
2125         switch (hipz_rc) {
2126         case H_SUCCESS:              /* successful completion */
2127                 return 0;
2128         case H_ADAPTER_PARM:         /* invalid adapter handle */
2129         case H_RH_PARM:              /* invalid resource handle */
2130                 return -EINVAL;
2131         case H_BUSY:                 /* long busy */
2132                 return -EBUSY;
2133         default:
2134                 return -EINVAL;
2135         }
2136 } /* end ehca_mrmw_map_hrc_query_mr() */
2137
2138 /*----------------------------------------------------------------------*/
2139 /*----------------------------------------------------------------------*/
2140
2141 /*
2142  * map HIPZ rc to IB retcodes for freeing MR resource
2143  * Used for hipz_h_free_resource_mr
2144  */
2145 int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
2146 {
2147         switch (hipz_rc) {
2148         case H_SUCCESS:      /* resource freed */
2149                 return 0;
2150         case H_ADAPTER_PARM: /* invalid adapter handle */
2151         case H_RH_PARM:      /* invalid resource handle */
2152         case H_R_STATE:      /* invalid resource state */
2153         case H_HARDWARE:     /* HCA not operational */
2154                 return -EINVAL;
2155         case H_RESOURCE:     /* Resource in use */
2156         case H_BUSY:         /* long busy */
2157                 return -EBUSY;
2158         default:
2159                 return -EINVAL;
2160         }
2161 } /* end ehca_mrmw_map_hrc_free_mr() */
2162
2163 /*----------------------------------------------------------------------*/
2164
2165 /*
2166  * map HIPZ rc to IB retcodes for freeing MW resource
2167  * Used for hipz_h_free_resource_mw
2168  */
2169 int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
2170 {
2171         switch (hipz_rc) {
2172         case H_SUCCESS:      /* resource freed */
2173                 return 0;
2174         case H_ADAPTER_PARM: /* invalid adapter handle */
2175         case H_RH_PARM:      /* invalid resource handle */
2176         case H_R_STATE:      /* invalid resource state */
2177         case H_HARDWARE:     /* HCA not operational */
2178                 return -EINVAL;
2179         case H_RESOURCE:     /* Resource in use */
2180         case H_BUSY:         /* long busy */
2181                 return -EBUSY;
2182         default:
2183                 return -EINVAL;
2184         }
2185 } /* end ehca_mrmw_map_hrc_free_mw() */
2186
2187 /*----------------------------------------------------------------------*/
2188
2189 /*
2190  * map HIPZ rc to IB retcodes for SMR registrations
2191  * Used for hipz_h_register_smr.
2192  */
2193 int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
2194 {
2195         switch (hipz_rc) {
2196         case H_SUCCESS:              /* successful completion */
2197                 return 0;
2198         case H_ADAPTER_PARM:         /* invalid adapter handle */
2199         case H_RH_PARM:              /* invalid resource handle */
2200         case H_MEM_PARM:             /* invalid MR virtual address */
2201         case H_MEM_ACCESS_PARM:      /* invalid access controls */
2202         case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
2203                 return -EINVAL;
2204         case H_BUSY:                 /* long busy */
2205                 return -EBUSY;
2206         default:
2207                 return -EINVAL;
2208         }
2209 } /* end ehca_mrmw_map_hrc_reg_smr() */
2210
2211 /*----------------------------------------------------------------------*/
2212
2213 /*
2214  * MR destructor and constructor
2215  * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2216  * except struct ib_mr and spinlock
2217  */
2218 void ehca_mr_deletenew(struct ehca_mr *mr)
2219 {
2220         mr->flags         = 0;
2221         mr->num_pages     = 0;
2222         mr->num_4k        = 0;
2223         mr->acl           = 0;
2224         mr->start         = NULL;
2225         mr->fmr_page_size = 0;
2226         mr->fmr_max_pages = 0;
2227         mr->fmr_max_maps  = 0;
2228         mr->fmr_map_cnt   = 0;
2229         memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2230         memset(&mr->galpas, 0, sizeof(mr->galpas));
2231         mr->nr_of_pages   = 0;
2232         mr->pagearray     = NULL;
2233 } /* end ehca_mr_deletenew() */
2234
2235 int ehca_init_mrmw_cache(void)
2236 {
2237         mr_cache = kmem_cache_create("ehca_cache_mr",
2238                                      sizeof(struct ehca_mr), 0,
2239                                      SLAB_HWCACHE_ALIGN,
2240                                      NULL, NULL);
2241         if (!mr_cache)
2242                 return -ENOMEM;
2243         mw_cache = kmem_cache_create("ehca_cache_mw",
2244                                      sizeof(struct ehca_mw), 0,
2245                                      SLAB_HWCACHE_ALIGN,
2246                                      NULL, NULL);
2247         if (!mw_cache) {
2248                 kmem_cache_destroy(mr_cache);
2249                 mr_cache = NULL;
2250                 return -ENOMEM;
2251         }
2252         return 0;
2253 }
2254
2255 void ehca_cleanup_mrmw_cache(void)
2256 {
2257         if (mr_cache)
2258                 kmem_cache_destroy(mr_cache);
2259         if (mw_cache)
2260                 kmem_cache_destroy(mw_cache);
2261 }