Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / infiniband / hw / qib / qib_file_ops.c
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3  * All rights reserved.
4  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/pci.h>
36 #include <linux/poll.h>
37 #include <linux/cdev.h>
38 #include <linux/swap.h>
39 #include <linux/vmalloc.h>
40 #include <linux/highmem.h>
41 #include <linux/io.h>
42 #include <linux/uio.h>
43 #include <linux/jiffies.h>
44 #include <asm/pgtable.h>
45 #include <linux/delay.h>
46
47 #include "qib.h"
48 #include "qib_common.h"
49 #include "qib_user_sdma.h"
50
51 static int qib_open(struct inode *, struct file *);
52 static int qib_close(struct inode *, struct file *);
53 static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
54 static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
55                              unsigned long, loff_t);
56 static unsigned int qib_poll(struct file *, struct poll_table_struct *);
57 static int qib_mmapf(struct file *, struct vm_area_struct *);
58
59 static const struct file_operations qib_file_ops = {
60         .owner = THIS_MODULE,
61         .write = qib_write,
62         .aio_write = qib_aio_write,
63         .open = qib_open,
64         .release = qib_close,
65         .poll = qib_poll,
66         .mmap = qib_mmapf
67 };
68
69 /*
70  * Convert kernel virtual addresses to physical addresses so they don't
71  * potentially conflict with the chip addresses used as mmap offsets.
72  * It doesn't really matter what mmap offset we use as long as we can
73  * interpret it correctly.
74  */
75 static u64 cvt_kvaddr(void *p)
76 {
77         struct page *page;
78         u64 paddr = 0;
79
80         page = vmalloc_to_page(p);
81         if (page)
82                 paddr = page_to_pfn(page) << PAGE_SHIFT;
83
84         return paddr;
85 }
86
87 static int qib_get_base_info(struct file *fp, void __user *ubase,
88                              size_t ubase_size)
89 {
90         struct qib_ctxtdata *rcd = ctxt_fp(fp);
91         int ret = 0;
92         struct qib_base_info *kinfo = NULL;
93         struct qib_devdata *dd = rcd->dd;
94         struct qib_pportdata *ppd = rcd->ppd;
95         unsigned subctxt_cnt;
96         int shared, master;
97         size_t sz;
98
99         subctxt_cnt = rcd->subctxt_cnt;
100         if (!subctxt_cnt) {
101                 shared = 0;
102                 master = 0;
103                 subctxt_cnt = 1;
104         } else {
105                 shared = 1;
106                 master = !subctxt_fp(fp);
107         }
108
109         sz = sizeof(*kinfo);
110         /* If context sharing is not requested, allow the old size structure */
111         if (!shared)
112                 sz -= 7 * sizeof(u64);
113         if (ubase_size < sz) {
114                 ret = -EINVAL;
115                 goto bail;
116         }
117
118         kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
119         if (kinfo == NULL) {
120                 ret = -ENOMEM;
121                 goto bail;
122         }
123
124         ret = dd->f_get_base_info(rcd, kinfo);
125         if (ret < 0)
126                 goto bail;
127
128         kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
129         kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
130         kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
131         kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
132         /*
133          * have to mmap whole thing
134          */
135         kinfo->spi_rcv_egrbuftotlen =
136                 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
137         kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
138         kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
139                 rcd->rcvegrbuf_chunks;
140         kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
141         if (master)
142                 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
143         /*
144          * for this use, may be cfgctxts summed over all chips that
145          * are are configured and present
146          */
147         kinfo->spi_nctxts = dd->cfgctxts;
148         /* unit (chip/board) our context is on */
149         kinfo->spi_unit = dd->unit;
150         kinfo->spi_port = ppd->port;
151         /* for now, only a single page */
152         kinfo->spi_tid_maxsize = PAGE_SIZE;
153
154         /*
155          * Doing this per context, and based on the skip value, etc.  This has
156          * to be the actual buffer size, since the protocol code treats it
157          * as an array.
158          *
159          * These have to be set to user addresses in the user code via mmap.
160          * These values are used on return to user code for the mmap target
161          * addresses only.  For 32 bit, same 44 bit address problem, so use
162          * the physical address, not virtual.  Before 2.6.11, using the
163          * page_address() macro worked, but in 2.6.11, even that returns the
164          * full 64 bit address (upper bits all 1's).  So far, using the
165          * physical addresses (or chip offsets, for chip mapping) works, but
166          * no doubt some future kernel release will change that, and we'll be
167          * on to yet another method of dealing with this.
168          * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
169          * since the chips with non-zero rhf_offset don't normally
170          * enable tail register updates to host memory, but for testing,
171          * both can be enabled and used.
172          */
173         kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
174         kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
175         kinfo->spi_rhf_offset = dd->rhf_offset;
176         kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
177         kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
178         /* setup per-unit (not port) status area for user programs */
179         kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
180                 (char *) ppd->statusp -
181                 (char *) dd->pioavailregs_dma;
182         kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
183         if (!shared) {
184                 kinfo->spi_piocnt = rcd->piocnt;
185                 kinfo->spi_piobufbase = (u64) rcd->piobufs;
186                 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
187         } else if (master) {
188                 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
189                                     (rcd->piocnt % subctxt_cnt);
190                 /* Master's PIO buffers are after all the slave's */
191                 kinfo->spi_piobufbase = (u64) rcd->piobufs +
192                         dd->palign *
193                         (rcd->piocnt - kinfo->spi_piocnt);
194         } else {
195                 unsigned slave = subctxt_fp(fp) - 1;
196
197                 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
198                 kinfo->spi_piobufbase = (u64) rcd->piobufs +
199                         dd->palign * kinfo->spi_piocnt * slave;
200         }
201
202         if (shared) {
203                 kinfo->spi_sendbuf_status =
204                         cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
205                 /* only spi_subctxt_* fields should be set in this block! */
206                 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
207
208                 kinfo->spi_subctxt_rcvegrbuf =
209                         cvt_kvaddr(rcd->subctxt_rcvegrbuf);
210                 kinfo->spi_subctxt_rcvhdr_base =
211                         cvt_kvaddr(rcd->subctxt_rcvhdr_base);
212         }
213
214         /*
215          * All user buffers are 2KB buffers.  If we ever support
216          * giving 4KB buffers to user processes, this will need some
217          * work.  Can't use piobufbase directly, because it has
218          * both 2K and 4K buffer base values.
219          */
220         kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
221                 dd->palign;
222         kinfo->spi_pioalign = dd->palign;
223         kinfo->spi_qpair = QIB_KD_QP;
224         /*
225          * user mode PIO buffers are always 2KB, even when 4KB can
226          * be received, and sent via the kernel; this is ibmaxlen
227          * for 2K MTU.
228          */
229         kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
230         kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
231         kinfo->spi_ctxt = rcd->ctxt;
232         kinfo->spi_subctxt = subctxt_fp(fp);
233         kinfo->spi_sw_version = QIB_KERN_SWVERSION;
234         kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
235         kinfo->spi_hw_version = dd->revision;
236
237         if (master)
238                 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
239
240         sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
241         if (copy_to_user(ubase, kinfo, sz))
242                 ret = -EFAULT;
243 bail:
244         kfree(kinfo);
245         return ret;
246 }
247
248 /**
249  * qib_tid_update - update a context TID
250  * @rcd: the context
251  * @fp: the qib device file
252  * @ti: the TID information
253  *
254  * The new implementation as of Oct 2004 is that the driver assigns
255  * the tid and returns it to the caller.   To reduce search time, we
256  * keep a cursor for each context, walking the shadow tid array to find
257  * one that's not in use.
258  *
259  * For now, if we can't allocate the full list, we fail, although
260  * in the long run, we'll allocate as many as we can, and the
261  * caller will deal with that by trying the remaining pages later.
262  * That means that when we fail, we have to mark the tids as not in
263  * use again, in our shadow copy.
264  *
265  * It's up to the caller to free the tids when they are done.
266  * We'll unlock the pages as they free them.
267  *
268  * Also, right now we are locking one page at a time, but since
269  * the intended use of this routine is for a single group of
270  * virtually contiguous pages, that should change to improve
271  * performance.
272  */
273 static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
274                           const struct qib_tid_info *ti)
275 {
276         int ret = 0, ntids;
277         u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
278         u16 *tidlist;
279         struct qib_devdata *dd = rcd->dd;
280         u64 physaddr;
281         unsigned long vaddr;
282         u64 __iomem *tidbase;
283         unsigned long tidmap[8];
284         struct page **pagep = NULL;
285         unsigned subctxt = subctxt_fp(fp);
286
287         if (!dd->pageshadow) {
288                 ret = -ENOMEM;
289                 goto done;
290         }
291
292         cnt = ti->tidcnt;
293         if (!cnt) {
294                 ret = -EFAULT;
295                 goto done;
296         }
297         ctxttid = rcd->ctxt * dd->rcvtidcnt;
298         if (!rcd->subctxt_cnt) {
299                 tidcnt = dd->rcvtidcnt;
300                 tid = rcd->tidcursor;
301                 tidoff = 0;
302         } else if (!subctxt) {
303                 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
304                          (dd->rcvtidcnt % rcd->subctxt_cnt);
305                 tidoff = dd->rcvtidcnt - tidcnt;
306                 ctxttid += tidoff;
307                 tid = tidcursor_fp(fp);
308         } else {
309                 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
310                 tidoff = tidcnt * (subctxt - 1);
311                 ctxttid += tidoff;
312                 tid = tidcursor_fp(fp);
313         }
314         if (cnt > tidcnt) {
315                 /* make sure it all fits in tid_pg_list */
316                 qib_devinfo(dd->pcidev, "Process tried to allocate %u "
317                          "TIDs, only trying max (%u)\n", cnt, tidcnt);
318                 cnt = tidcnt;
319         }
320         pagep = (struct page **) rcd->tid_pg_list;
321         tidlist = (u16 *) &pagep[dd->rcvtidcnt];
322         pagep += tidoff;
323         tidlist += tidoff;
324
325         memset(tidmap, 0, sizeof(tidmap));
326         /* before decrement; chip actual # */
327         ntids = tidcnt;
328         tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
329                                    dd->rcvtidbase +
330                                    ctxttid * sizeof(*tidbase));
331
332         /* virtual address of first page in transfer */
333         vaddr = ti->tidvaddr;
334         if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
335                        cnt * PAGE_SIZE)) {
336                 ret = -EFAULT;
337                 goto done;
338         }
339         ret = qib_get_user_pages(vaddr, cnt, pagep);
340         if (ret) {
341                 /*
342                  * if (ret == -EBUSY)
343                  * We can't continue because the pagep array won't be
344                  * initialized. This should never happen,
345                  * unless perhaps the user has mpin'ed the pages
346                  * themselves.
347                  */
348                 qib_devinfo(dd->pcidev,
349                          "Failed to lock addr %p, %u pages: "
350                          "errno %d\n", (void *) vaddr, cnt, -ret);
351                 goto done;
352         }
353         for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
354                 for (; ntids--; tid++) {
355                         if (tid == tidcnt)
356                                 tid = 0;
357                         if (!dd->pageshadow[ctxttid + tid])
358                                 break;
359                 }
360                 if (ntids < 0) {
361                         /*
362                          * Oops, wrapped all the way through their TIDs,
363                          * and didn't have enough free; see comments at
364                          * start of routine
365                          */
366                         i--;    /* last tidlist[i] not filled in */
367                         ret = -ENOMEM;
368                         break;
369                 }
370                 tidlist[i] = tid + tidoff;
371                 /* we "know" system pages and TID pages are same size */
372                 dd->pageshadow[ctxttid + tid] = pagep[i];
373                 dd->physshadow[ctxttid + tid] =
374                         qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
375                                      PCI_DMA_FROMDEVICE);
376                 /*
377                  * don't need atomic or it's overhead
378                  */
379                 __set_bit(tid, tidmap);
380                 physaddr = dd->physshadow[ctxttid + tid];
381                 /* PERFORMANCE: below should almost certainly be cached */
382                 dd->f_put_tid(dd, &tidbase[tid],
383                                   RCVHQ_RCV_TYPE_EXPECTED, physaddr);
384                 /*
385                  * don't check this tid in qib_ctxtshadow, since we
386                  * just filled it in; start with the next one.
387                  */
388                 tid++;
389         }
390
391         if (ret) {
392                 u32 limit;
393 cleanup:
394                 /* jump here if copy out of updated info failed... */
395                 /* same code that's in qib_free_tid() */
396                 limit = sizeof(tidmap) * BITS_PER_BYTE;
397                 if (limit > tidcnt)
398                         /* just in case size changes in future */
399                         limit = tidcnt;
400                 tid = find_first_bit((const unsigned long *)tidmap, limit);
401                 for (; tid < limit; tid++) {
402                         if (!test_bit(tid, tidmap))
403                                 continue;
404                         if (dd->pageshadow[ctxttid + tid]) {
405                                 dma_addr_t phys;
406
407                                 phys = dd->physshadow[ctxttid + tid];
408                                 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
409                                 /* PERFORMANCE: below should almost certainly
410                                  * be cached
411                                  */
412                                 dd->f_put_tid(dd, &tidbase[tid],
413                                               RCVHQ_RCV_TYPE_EXPECTED,
414                                               dd->tidinvalid);
415                                 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
416                                                PCI_DMA_FROMDEVICE);
417                                 dd->pageshadow[ctxttid + tid] = NULL;
418                         }
419                 }
420                 qib_release_user_pages(pagep, cnt);
421         } else {
422                 /*
423                  * Copy the updated array, with qib_tid's filled in, back
424                  * to user.  Since we did the copy in already, this "should
425                  * never fail" If it does, we have to clean up...
426                  */
427                 if (copy_to_user((void __user *)
428                                  (unsigned long) ti->tidlist,
429                                  tidlist, cnt * sizeof(*tidlist))) {
430                         ret = -EFAULT;
431                         goto cleanup;
432                 }
433                 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
434                                  tidmap, sizeof tidmap)) {
435                         ret = -EFAULT;
436                         goto cleanup;
437                 }
438                 if (tid == tidcnt)
439                         tid = 0;
440                 if (!rcd->subctxt_cnt)
441                         rcd->tidcursor = tid;
442                 else
443                         tidcursor_fp(fp) = tid;
444         }
445
446 done:
447         return ret;
448 }
449
450 /**
451  * qib_tid_free - free a context TID
452  * @rcd: the context
453  * @subctxt: the subcontext
454  * @ti: the TID info
455  *
456  * right now we are unlocking one page at a time, but since
457  * the intended use of this routine is for a single group of
458  * virtually contiguous pages, that should change to improve
459  * performance.  We check that the TID is in range for this context
460  * but otherwise don't check validity; if user has an error and
461  * frees the wrong tid, it's only their own data that can thereby
462  * be corrupted.  We do check that the TID was in use, for sanity
463  * We always use our idea of the saved address, not the address that
464  * they pass in to us.
465  */
466 static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
467                         const struct qib_tid_info *ti)
468 {
469         int ret = 0;
470         u32 tid, ctxttid, cnt, limit, tidcnt;
471         struct qib_devdata *dd = rcd->dd;
472         u64 __iomem *tidbase;
473         unsigned long tidmap[8];
474
475         if (!dd->pageshadow) {
476                 ret = -ENOMEM;
477                 goto done;
478         }
479
480         if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
481                            sizeof tidmap)) {
482                 ret = -EFAULT;
483                 goto done;
484         }
485
486         ctxttid = rcd->ctxt * dd->rcvtidcnt;
487         if (!rcd->subctxt_cnt)
488                 tidcnt = dd->rcvtidcnt;
489         else if (!subctxt) {
490                 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
491                          (dd->rcvtidcnt % rcd->subctxt_cnt);
492                 ctxttid += dd->rcvtidcnt - tidcnt;
493         } else {
494                 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
495                 ctxttid += tidcnt * (subctxt - 1);
496         }
497         tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
498                                    dd->rcvtidbase +
499                                    ctxttid * sizeof(*tidbase));
500
501         limit = sizeof(tidmap) * BITS_PER_BYTE;
502         if (limit > tidcnt)
503                 /* just in case size changes in future */
504                 limit = tidcnt;
505         tid = find_first_bit(tidmap, limit);
506         for (cnt = 0; tid < limit; tid++) {
507                 /*
508                  * small optimization; if we detect a run of 3 or so without
509                  * any set, use find_first_bit again.  That's mainly to
510                  * accelerate the case where we wrapped, so we have some at
511                  * the beginning, and some at the end, and a big gap
512                  * in the middle.
513                  */
514                 if (!test_bit(tid, tidmap))
515                         continue;
516                 cnt++;
517                 if (dd->pageshadow[ctxttid + tid]) {
518                         struct page *p;
519                         dma_addr_t phys;
520
521                         p = dd->pageshadow[ctxttid + tid];
522                         dd->pageshadow[ctxttid + tid] = NULL;
523                         phys = dd->physshadow[ctxttid + tid];
524                         dd->physshadow[ctxttid + tid] = dd->tidinvalid;
525                         /* PERFORMANCE: below should almost certainly be
526                          * cached
527                          */
528                         dd->f_put_tid(dd, &tidbase[tid],
529                                       RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
530                         pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
531                                        PCI_DMA_FROMDEVICE);
532                         qib_release_user_pages(&p, 1);
533                 }
534         }
535 done:
536         return ret;
537 }
538
539 /**
540  * qib_set_part_key - set a partition key
541  * @rcd: the context
542  * @key: the key
543  *
544  * We can have up to 4 active at a time (other than the default, which is
545  * always allowed).  This is somewhat tricky, since multiple contexts may set
546  * the same key, so we reference count them, and clean up at exit.  All 4
547  * partition keys are packed into a single qlogic_ib register.  It's an
548  * error for a process to set the same pkey multiple times.  We provide no
549  * mechanism to de-allocate a pkey at this time, we may eventually need to
550  * do that.  I've used the atomic operations, and no locking, and only make
551  * a single pass through what's available.  This should be more than
552  * adequate for some time. I'll think about spinlocks or the like if and as
553  * it's necessary.
554  */
555 static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
556 {
557         struct qib_pportdata *ppd = rcd->ppd;
558         int i, any = 0, pidx = -1;
559         u16 lkey = key & 0x7FFF;
560         int ret;
561
562         if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
563                 /* nothing to do; this key always valid */
564                 ret = 0;
565                 goto bail;
566         }
567
568         if (!lkey) {
569                 ret = -EINVAL;
570                 goto bail;
571         }
572
573         /*
574          * Set the full membership bit, because it has to be
575          * set in the register or the packet, and it seems
576          * cleaner to set in the register than to force all
577          * callers to set it.
578          */
579         key |= 0x8000;
580
581         for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
582                 if (!rcd->pkeys[i] && pidx == -1)
583                         pidx = i;
584                 if (rcd->pkeys[i] == key) {
585                         ret = -EEXIST;
586                         goto bail;
587                 }
588         }
589         if (pidx == -1) {
590                 ret = -EBUSY;
591                 goto bail;
592         }
593         for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
594                 if (!ppd->pkeys[i]) {
595                         any++;
596                         continue;
597                 }
598                 if (ppd->pkeys[i] == key) {
599                         atomic_t *pkrefs = &ppd->pkeyrefs[i];
600
601                         if (atomic_inc_return(pkrefs) > 1) {
602                                 rcd->pkeys[pidx] = key;
603                                 ret = 0;
604                                 goto bail;
605                         } else {
606                                 /*
607                                  * lost race, decrement count, catch below
608                                  */
609                                 atomic_dec(pkrefs);
610                                 any++;
611                         }
612                 }
613                 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
614                         /*
615                          * It makes no sense to have both the limited and
616                          * full membership PKEY set at the same time since
617                          * the unlimited one will disable the limited one.
618                          */
619                         ret = -EEXIST;
620                         goto bail;
621                 }
622         }
623         if (!any) {
624                 ret = -EBUSY;
625                 goto bail;
626         }
627         for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
628                 if (!ppd->pkeys[i] &&
629                     atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
630                         rcd->pkeys[pidx] = key;
631                         ppd->pkeys[i] = key;
632                         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
633                         ret = 0;
634                         goto bail;
635                 }
636         }
637         ret = -EBUSY;
638
639 bail:
640         return ret;
641 }
642
643 /**
644  * qib_manage_rcvq - manage a context's receive queue
645  * @rcd: the context
646  * @subctxt: the subcontext
647  * @start_stop: action to carry out
648  *
649  * start_stop == 0 disables receive on the context, for use in queue
650  * overflow conditions.  start_stop==1 re-enables, to be used to
651  * re-init the software copy of the head register
652  */
653 static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
654                            int start_stop)
655 {
656         struct qib_devdata *dd = rcd->dd;
657         unsigned int rcvctrl_op;
658
659         if (subctxt)
660                 goto bail;
661         /* atomically clear receive enable ctxt. */
662         if (start_stop) {
663                 /*
664                  * On enable, force in-memory copy of the tail register to
665                  * 0, so that protocol code doesn't have to worry about
666                  * whether or not the chip has yet updated the in-memory
667                  * copy or not on return from the system call. The chip
668                  * always resets it's tail register back to 0 on a
669                  * transition from disabled to enabled.
670                  */
671                 if (rcd->rcvhdrtail_kvaddr)
672                         qib_clear_rcvhdrtail(rcd);
673                 rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
674         } else
675                 rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
676         dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
677         /* always; new head should be equal to new tail; see above */
678 bail:
679         return 0;
680 }
681
682 static void qib_clean_part_key(struct qib_ctxtdata *rcd,
683                                struct qib_devdata *dd)
684 {
685         int i, j, pchanged = 0;
686         u64 oldpkey;
687         struct qib_pportdata *ppd = rcd->ppd;
688
689         /* for debugging only */
690         oldpkey = (u64) ppd->pkeys[0] |
691                 ((u64) ppd->pkeys[1] << 16) |
692                 ((u64) ppd->pkeys[2] << 32) |
693                 ((u64) ppd->pkeys[3] << 48);
694
695         for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
696                 if (!rcd->pkeys[i])
697                         continue;
698                 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
699                         /* check for match independent of the global bit */
700                         if ((ppd->pkeys[j] & 0x7fff) !=
701                             (rcd->pkeys[i] & 0x7fff))
702                                 continue;
703                         if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
704                                 ppd->pkeys[j] = 0;
705                                 pchanged++;
706                         }
707                         break;
708                 }
709                 rcd->pkeys[i] = 0;
710         }
711         if (pchanged)
712                 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
713 }
714
715 /* common code for the mappings on dma_alloc_coherent mem */
716 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
717                         unsigned len, void *kvaddr, u32 write_ok, char *what)
718 {
719         struct qib_devdata *dd = rcd->dd;
720         unsigned long pfn;
721         int ret;
722
723         if ((vma->vm_end - vma->vm_start) > len) {
724                 qib_devinfo(dd->pcidev,
725                          "FAIL on %s: len %lx > %x\n", what,
726                          vma->vm_end - vma->vm_start, len);
727                 ret = -EFAULT;
728                 goto bail;
729         }
730
731         /*
732          * shared context user code requires rcvhdrq mapped r/w, others
733          * only allowed readonly mapping.
734          */
735         if (!write_ok) {
736                 if (vma->vm_flags & VM_WRITE) {
737                         qib_devinfo(dd->pcidev,
738                                  "%s must be mapped readonly\n", what);
739                         ret = -EPERM;
740                         goto bail;
741                 }
742
743                 /* don't allow them to later change with mprotect */
744                 vma->vm_flags &= ~VM_MAYWRITE;
745         }
746
747         pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
748         ret = remap_pfn_range(vma, vma->vm_start, pfn,
749                               len, vma->vm_page_prot);
750         if (ret)
751                 qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
752                          "bytes failed: %d\n", what, rcd->ctxt,
753                          pfn, len, ret);
754 bail:
755         return ret;
756 }
757
758 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
759                      u64 ureg)
760 {
761         unsigned long phys;
762         unsigned long sz;
763         int ret;
764
765         /*
766          * This is real hardware, so use io_remap.  This is the mechanism
767          * for the user process to update the head registers for their ctxt
768          * in the chip.
769          */
770         sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
771         if ((vma->vm_end - vma->vm_start) > sz) {
772                 qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
773                          "%lx > PAGE\n", vma->vm_end - vma->vm_start);
774                 ret = -EFAULT;
775         } else {
776                 phys = dd->physaddr + ureg;
777                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
778
779                 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
780                 ret = io_remap_pfn_range(vma, vma->vm_start,
781                                          phys >> PAGE_SHIFT,
782                                          vma->vm_end - vma->vm_start,
783                                          vma->vm_page_prot);
784         }
785         return ret;
786 }
787
788 static int mmap_piobufs(struct vm_area_struct *vma,
789                         struct qib_devdata *dd,
790                         struct qib_ctxtdata *rcd,
791                         unsigned piobufs, unsigned piocnt)
792 {
793         unsigned long phys;
794         int ret;
795
796         /*
797          * When we map the PIO buffers in the chip, we want to map them as
798          * writeonly, no read possible; unfortunately, x86 doesn't allow
799          * for this in hardware, but we still prevent users from asking
800          * for it.
801          */
802         if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
803                 qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
804                          "reqlen %lx > PAGE\n",
805                          vma->vm_end - vma->vm_start);
806                 ret = -EINVAL;
807                 goto bail;
808         }
809
810         phys = dd->physaddr + piobufs;
811
812 #if defined(__powerpc__)
813         /* There isn't a generic way to specify writethrough mappings */
814         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
815         pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
816         pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
817 #endif
818
819         /*
820          * don't allow them to later change to readable with mprotect (for when
821          * not initially mapped readable, as is normally the case)
822          */
823         vma->vm_flags &= ~VM_MAYREAD;
824         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
825
826         if (qib_wc_pat)
827                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
828
829         ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
830                                  vma->vm_end - vma->vm_start,
831                                  vma->vm_page_prot);
832 bail:
833         return ret;
834 }
835
836 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
837                            struct qib_ctxtdata *rcd)
838 {
839         struct qib_devdata *dd = rcd->dd;
840         unsigned long start, size;
841         size_t total_size, i;
842         unsigned long pfn;
843         int ret;
844
845         size = rcd->rcvegrbuf_size;
846         total_size = rcd->rcvegrbuf_chunks * size;
847         if ((vma->vm_end - vma->vm_start) > total_size) {
848                 qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
849                          "reqlen %lx > actual %lx\n",
850                          vma->vm_end - vma->vm_start,
851                          (unsigned long) total_size);
852                 ret = -EINVAL;
853                 goto bail;
854         }
855
856         if (vma->vm_flags & VM_WRITE) {
857                 qib_devinfo(dd->pcidev, "Can't map eager buffers as "
858                          "writable (flags=%lx)\n", vma->vm_flags);
859                 ret = -EPERM;
860                 goto bail;
861         }
862         /* don't allow them to later change to writeable with mprotect */
863         vma->vm_flags &= ~VM_MAYWRITE;
864
865         start = vma->vm_start;
866
867         for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
868                 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
869                 ret = remap_pfn_range(vma, start, pfn, size,
870                                       vma->vm_page_prot);
871                 if (ret < 0)
872                         goto bail;
873         }
874         ret = 0;
875
876 bail:
877         return ret;
878 }
879
880 /*
881  * qib_file_vma_fault - handle a VMA page fault.
882  */
883 static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
884 {
885         struct page *page;
886
887         page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
888         if (!page)
889                 return VM_FAULT_SIGBUS;
890
891         get_page(page);
892         vmf->page = page;
893
894         return 0;
895 }
896
897 static struct vm_operations_struct qib_file_vm_ops = {
898         .fault = qib_file_vma_fault,
899 };
900
901 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
902                        struct qib_ctxtdata *rcd, unsigned subctxt)
903 {
904         struct qib_devdata *dd = rcd->dd;
905         unsigned subctxt_cnt;
906         unsigned long len;
907         void *addr;
908         size_t size;
909         int ret = 0;
910
911         subctxt_cnt = rcd->subctxt_cnt;
912         size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
913
914         /*
915          * Each process has all the subctxt uregbase, rcvhdrq, and
916          * rcvegrbufs mmapped - as an array for all the processes,
917          * and also separately for this process.
918          */
919         if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
920                 addr = rcd->subctxt_uregbase;
921                 size = PAGE_SIZE * subctxt_cnt;
922         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
923                 addr = rcd->subctxt_rcvhdr_base;
924                 size = rcd->rcvhdrq_size * subctxt_cnt;
925         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
926                 addr = rcd->subctxt_rcvegrbuf;
927                 size *= subctxt_cnt;
928         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
929                                         PAGE_SIZE * subctxt)) {
930                 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
931                 size = PAGE_SIZE;
932         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
933                                         rcd->rcvhdrq_size * subctxt)) {
934                 addr = rcd->subctxt_rcvhdr_base +
935                         rcd->rcvhdrq_size * subctxt;
936                 size = rcd->rcvhdrq_size;
937         } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
938                 addr = rcd->user_event_mask;
939                 size = PAGE_SIZE;
940         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
941                                         size * subctxt)) {
942                 addr = rcd->subctxt_rcvegrbuf + size * subctxt;
943                 /* rcvegrbufs are read-only on the slave */
944                 if (vma->vm_flags & VM_WRITE) {
945                         qib_devinfo(dd->pcidev,
946                                  "Can't map eager buffers as "
947                                  "writable (flags=%lx)\n", vma->vm_flags);
948                         ret = -EPERM;
949                         goto bail;
950                 }
951                 /*
952                  * Don't allow permission to later change to writeable
953                  * with mprotect.
954                  */
955                 vma->vm_flags &= ~VM_MAYWRITE;
956         } else
957                 goto bail;
958         len = vma->vm_end - vma->vm_start;
959         if (len > size) {
960                 ret = -EINVAL;
961                 goto bail;
962         }
963
964         vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
965         vma->vm_ops = &qib_file_vm_ops;
966         vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
967         ret = 1;
968
969 bail:
970         return ret;
971 }
972
973 /**
974  * qib_mmapf - mmap various structures into user space
975  * @fp: the file pointer
976  * @vma: the VM area
977  *
978  * We use this to have a shared buffer between the kernel and the user code
979  * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
980  * buffers in the chip.  We have the open and close entries so we can bump
981  * the ref count and keep the driver from being unloaded while still mapped.
982  */
983 static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
984 {
985         struct qib_ctxtdata *rcd;
986         struct qib_devdata *dd;
987         u64 pgaddr, ureg;
988         unsigned piobufs, piocnt;
989         int ret, match = 1;
990
991         rcd = ctxt_fp(fp);
992         if (!rcd || !(vma->vm_flags & VM_SHARED)) {
993                 ret = -EINVAL;
994                 goto bail;
995         }
996         dd = rcd->dd;
997
998         /*
999          * This is the qib_do_user_init() code, mapping the shared buffers
1000          * and per-context user registers into the user process. The address
1001          * referred to by vm_pgoff is the file offset passed via mmap().
1002          * For shared contexts, this is the kernel vmalloc() address of the
1003          * pages to share with the master.
1004          * For non-shared or master ctxts, this is a physical address.
1005          * We only do one mmap for each space mapped.
1006          */
1007         pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1008
1009         /*
1010          * Check for 0 in case one of the allocations failed, but user
1011          * called mmap anyway.
1012          */
1013         if (!pgaddr)  {
1014                 ret = -EINVAL;
1015                 goto bail;
1016         }
1017
1018         /*
1019          * Physical addresses must fit in 40 bits for our hardware.
1020          * Check for kernel virtual addresses first, anything else must
1021          * match a HW or memory address.
1022          */
1023         ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1024         if (ret) {
1025                 if (ret > 0)
1026                         ret = 0;
1027                 goto bail;
1028         }
1029
1030         ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1031         if (!rcd->subctxt_cnt) {
1032                 /* ctxt is not shared */
1033                 piocnt = rcd->piocnt;
1034                 piobufs = rcd->piobufs;
1035         } else if (!subctxt_fp(fp)) {
1036                 /* caller is the master */
1037                 piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1038                          (rcd->piocnt % rcd->subctxt_cnt);
1039                 piobufs = rcd->piobufs +
1040                         dd->palign * (rcd->piocnt - piocnt);
1041         } else {
1042                 unsigned slave = subctxt_fp(fp) - 1;
1043
1044                 /* caller is a slave */
1045                 piocnt = rcd->piocnt / rcd->subctxt_cnt;
1046                 piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1047         }
1048
1049         if (pgaddr == ureg)
1050                 ret = mmap_ureg(vma, dd, ureg);
1051         else if (pgaddr == piobufs)
1052                 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1053         else if (pgaddr == dd->pioavailregs_phys)
1054                 /* in-memory copy of pioavail registers */
1055                 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1056                                    (void *) dd->pioavailregs_dma, 0,
1057                                    "pioavail registers");
1058         else if (pgaddr == rcd->rcvegr_phys)
1059                 ret = mmap_rcvegrbufs(vma, rcd);
1060         else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1061                 /*
1062                  * The rcvhdrq itself; multiple pages, contiguous
1063                  * from an i/o perspective.  Shared contexts need
1064                  * to map r/w, so we allow writing.
1065                  */
1066                 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1067                                    rcd->rcvhdrq, 1, "rcvhdrq");
1068         else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1069                 /* in-memory copy of rcvhdrq tail register */
1070                 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1071                                    rcd->rcvhdrtail_kvaddr, 0,
1072                                    "rcvhdrq tail");
1073         else
1074                 match = 0;
1075         if (!match)
1076                 ret = -EINVAL;
1077
1078         vma->vm_private_data = NULL;
1079
1080         if (ret < 0)
1081                 qib_devinfo(dd->pcidev,
1082                          "mmap Failure %d: off %llx len %lx\n",
1083                          -ret, (unsigned long long)pgaddr,
1084                          vma->vm_end - vma->vm_start);
1085 bail:
1086         return ret;
1087 }
1088
1089 static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1090                                     struct file *fp,
1091                                     struct poll_table_struct *pt)
1092 {
1093         struct qib_devdata *dd = rcd->dd;
1094         unsigned pollflag;
1095
1096         poll_wait(fp, &rcd->wait, pt);
1097
1098         spin_lock_irq(&dd->uctxt_lock);
1099         if (rcd->urgent != rcd->urgent_poll) {
1100                 pollflag = POLLIN | POLLRDNORM;
1101                 rcd->urgent_poll = rcd->urgent;
1102         } else {
1103                 pollflag = 0;
1104                 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1105         }
1106         spin_unlock_irq(&dd->uctxt_lock);
1107
1108         return pollflag;
1109 }
1110
1111 static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1112                                   struct file *fp,
1113                                   struct poll_table_struct *pt)
1114 {
1115         struct qib_devdata *dd = rcd->dd;
1116         unsigned pollflag;
1117
1118         poll_wait(fp, &rcd->wait, pt);
1119
1120         spin_lock_irq(&dd->uctxt_lock);
1121         if (dd->f_hdrqempty(rcd)) {
1122                 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1123                 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1124                 pollflag = 0;
1125         } else
1126                 pollflag = POLLIN | POLLRDNORM;
1127         spin_unlock_irq(&dd->uctxt_lock);
1128
1129         return pollflag;
1130 }
1131
1132 static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1133 {
1134         struct qib_ctxtdata *rcd;
1135         unsigned pollflag;
1136
1137         rcd = ctxt_fp(fp);
1138         if (!rcd)
1139                 pollflag = POLLERR;
1140         else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1141                 pollflag = qib_poll_urgent(rcd, fp, pt);
1142         else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1143                 pollflag = qib_poll_next(rcd, fp, pt);
1144         else /* invalid */
1145                 pollflag = POLLERR;
1146
1147         return pollflag;
1148 }
1149
1150 /*
1151  * Check that userland and driver are compatible for subcontexts.
1152  */
1153 static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1154 {
1155         /* this code is written long-hand for clarity */
1156         if (QIB_USER_SWMAJOR != user_swmajor) {
1157                 /* no promise of compatibility if major mismatch */
1158                 return 0;
1159         }
1160         if (QIB_USER_SWMAJOR == 1) {
1161                 switch (QIB_USER_SWMINOR) {
1162                 case 0:
1163                 case 1:
1164                 case 2:
1165                         /* no subctxt implementation so cannot be compatible */
1166                         return 0;
1167                 case 3:
1168                         /* 3 is only compatible with itself */
1169                         return user_swminor == 3;
1170                 default:
1171                         /* >= 4 are compatible (or are expected to be) */
1172                         return user_swminor >= 4;
1173                 }
1174         }
1175         /* make no promises yet for future major versions */
1176         return 0;
1177 }
1178
1179 static int init_subctxts(struct qib_devdata *dd,
1180                          struct qib_ctxtdata *rcd,
1181                          const struct qib_user_info *uinfo)
1182 {
1183         int ret = 0;
1184         unsigned num_subctxts;
1185         size_t size;
1186
1187         /*
1188          * If the user is requesting zero subctxts,
1189          * skip the subctxt allocation.
1190          */
1191         if (uinfo->spu_subctxt_cnt <= 0)
1192                 goto bail;
1193         num_subctxts = uinfo->spu_subctxt_cnt;
1194
1195         /* Check for subctxt compatibility */
1196         if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1197                 uinfo->spu_userversion & 0xffff)) {
1198                 qib_devinfo(dd->pcidev,
1199                          "Mismatched user version (%d.%d) and driver "
1200                          "version (%d.%d) while context sharing. Ensure "
1201                          "that driver and library are from the same "
1202                          "release.\n",
1203                          (int) (uinfo->spu_userversion >> 16),
1204                          (int) (uinfo->spu_userversion & 0xffff),
1205                          QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1206                 goto bail;
1207         }
1208         if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1209                 ret = -EINVAL;
1210                 goto bail;
1211         }
1212
1213         rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1214         if (!rcd->subctxt_uregbase) {
1215                 ret = -ENOMEM;
1216                 goto bail;
1217         }
1218         /* Note: rcd->rcvhdrq_size isn't initialized yet. */
1219         size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1220                      sizeof(u32), PAGE_SIZE) * num_subctxts;
1221         rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1222         if (!rcd->subctxt_rcvhdr_base) {
1223                 ret = -ENOMEM;
1224                 goto bail_ureg;
1225         }
1226
1227         rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1228                                               rcd->rcvegrbuf_size *
1229                                               num_subctxts);
1230         if (!rcd->subctxt_rcvegrbuf) {
1231                 ret = -ENOMEM;
1232                 goto bail_rhdr;
1233         }
1234
1235         rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1236         rcd->subctxt_id = uinfo->spu_subctxt_id;
1237         rcd->active_slaves = 1;
1238         rcd->redirect_seq_cnt = 1;
1239         set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1240         goto bail;
1241
1242 bail_rhdr:
1243         vfree(rcd->subctxt_rcvhdr_base);
1244 bail_ureg:
1245         vfree(rcd->subctxt_uregbase);
1246         rcd->subctxt_uregbase = NULL;
1247 bail:
1248         return ret;
1249 }
1250
1251 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1252                       struct file *fp, const struct qib_user_info *uinfo)
1253 {
1254         struct qib_devdata *dd = ppd->dd;
1255         struct qib_ctxtdata *rcd;
1256         void *ptmp = NULL;
1257         int ret;
1258
1259         rcd = qib_create_ctxtdata(ppd, ctxt);
1260
1261         /*
1262          * Allocate memory for use in qib_tid_update() at open to
1263          * reduce cost of expected send setup per message segment
1264          */
1265         if (rcd)
1266                 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1267                                dd->rcvtidcnt * sizeof(struct page **),
1268                                GFP_KERNEL);
1269
1270         if (!rcd || !ptmp) {
1271                 qib_dev_err(dd, "Unable to allocate ctxtdata "
1272                             "memory, failing open\n");
1273                 ret = -ENOMEM;
1274                 goto bailerr;
1275         }
1276         rcd->userversion = uinfo->spu_userversion;
1277         ret = init_subctxts(dd, rcd, uinfo);
1278         if (ret)
1279                 goto bailerr;
1280         rcd->tid_pg_list = ptmp;
1281         rcd->pid = current->pid;
1282         init_waitqueue_head(&dd->rcd[ctxt]->wait);
1283         strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1284         ctxt_fp(fp) = rcd;
1285         qib_stats.sps_ctxts++;
1286         ret = 0;
1287         goto bail;
1288
1289 bailerr:
1290         dd->rcd[ctxt] = NULL;
1291         kfree(rcd);
1292         kfree(ptmp);
1293 bail:
1294         return ret;
1295 }
1296
1297 static inline int usable(struct qib_pportdata *ppd, int active_only)
1298 {
1299         struct qib_devdata *dd = ppd->dd;
1300         u32 linkok = active_only ? QIBL_LINKACTIVE :
1301                  (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
1302
1303         return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1304                 (ppd->lflags & linkok);
1305 }
1306
1307 static int find_free_ctxt(int unit, struct file *fp,
1308                           const struct qib_user_info *uinfo)
1309 {
1310         struct qib_devdata *dd = qib_lookup(unit);
1311         struct qib_pportdata *ppd = NULL;
1312         int ret;
1313         u32 ctxt;
1314
1315         if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
1316                 ret = -ENODEV;
1317                 goto bail;
1318         }
1319
1320         /*
1321          * If users requests specific port, only try that one port, else
1322          * select "best" port below, based on context.
1323          */
1324         if (uinfo->spu_port) {
1325                 ppd = dd->pport + uinfo->spu_port - 1;
1326                 if (!usable(ppd, 0)) {
1327                         ret = -ENETDOWN;
1328                         goto bail;
1329                 }
1330         }
1331
1332         for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
1333                 if (dd->rcd[ctxt])
1334                         continue;
1335                 /*
1336                  * The setting and clearing of user context rcd[x] protected
1337                  * by the qib_mutex
1338                  */
1339                 if (!ppd) {
1340                         /* choose port based on ctxt, if up, else 1st up */
1341                         ppd = dd->pport + (ctxt % dd->num_pports);
1342                         if (!usable(ppd, 0)) {
1343                                 int i;
1344                                 for (i = 0; i < dd->num_pports; i++) {
1345                                         ppd = dd->pport + i;
1346                                         if (usable(ppd, 0))
1347                                                 break;
1348                                 }
1349                                 if (i == dd->num_pports) {
1350                                         ret = -ENETDOWN;
1351                                         goto bail;
1352                                 }
1353                         }
1354                 }
1355                 ret = setup_ctxt(ppd, ctxt, fp, uinfo);
1356                 goto bail;
1357         }
1358         ret = -EBUSY;
1359
1360 bail:
1361         return ret;
1362 }
1363
1364 static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1365 {
1366         struct qib_pportdata *ppd;
1367         int ret = 0, devmax;
1368         int npresent, nup;
1369         int ndev;
1370         u32 port = uinfo->spu_port, ctxt;
1371
1372         devmax = qib_count_units(&npresent, &nup);
1373
1374         for (ndev = 0; ndev < devmax; ndev++) {
1375                 struct qib_devdata *dd = qib_lookup(ndev);
1376
1377                 /* device portion of usable() */
1378                 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1379                         continue;
1380                 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
1381                         if (dd->rcd[ctxt])
1382                                 continue;
1383                         if (port) {
1384                                 if (port > dd->num_pports)
1385                                         continue;
1386                                 ppd = dd->pport + port - 1;
1387                                 if (!usable(ppd, 0))
1388                                         continue;
1389                         } else {
1390                                 /*
1391                                  * choose port based on ctxt, if up, else
1392                                  * first port that's up for multi-port HCA
1393                                  */
1394                                 ppd = dd->pport + (ctxt % dd->num_pports);
1395                                 if (!usable(ppd, 0)) {
1396                                         int j;
1397
1398                                         ppd = NULL;
1399                                         for (j = 0; j < dd->num_pports &&
1400                                                 !ppd; j++)
1401                                                 if (usable(dd->pport + j, 0))
1402                                                         ppd = dd->pport + j;
1403                                         if (!ppd)
1404                                                 continue; /* to next unit */
1405                                 }
1406                         }
1407                         ret = setup_ctxt(ppd, ctxt, fp, uinfo);
1408                         goto done;
1409                 }
1410         }
1411
1412         if (npresent) {
1413                 if (nup == 0)
1414                         ret = -ENETDOWN;
1415                 else
1416                         ret = -EBUSY;
1417         } else
1418                 ret = -ENXIO;
1419
1420 done:
1421         return ret;
1422 }
1423
1424 static int find_shared_ctxt(struct file *fp,
1425                             const struct qib_user_info *uinfo)
1426 {
1427         int devmax, ndev, i;
1428         int ret = 0;
1429
1430         devmax = qib_count_units(NULL, NULL);
1431
1432         for (ndev = 0; ndev < devmax; ndev++) {
1433                 struct qib_devdata *dd = qib_lookup(ndev);
1434
1435                 /* device portion of usable() */
1436                 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1437                         continue;
1438                 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1439                         struct qib_ctxtdata *rcd = dd->rcd[i];
1440
1441                         /* Skip ctxts which are not yet open */
1442                         if (!rcd || !rcd->cnt)
1443                                 continue;
1444                         /* Skip ctxt if it doesn't match the requested one */
1445                         if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1446                                 continue;
1447                         /* Verify the sharing process matches the master */
1448                         if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1449                             rcd->userversion != uinfo->spu_userversion ||
1450                             rcd->cnt >= rcd->subctxt_cnt) {
1451                                 ret = -EINVAL;
1452                                 goto done;
1453                         }
1454                         ctxt_fp(fp) = rcd;
1455                         subctxt_fp(fp) = rcd->cnt++;
1456                         rcd->subpid[subctxt_fp(fp)] = current->pid;
1457                         tidcursor_fp(fp) = 0;
1458                         rcd->active_slaves |= 1 << subctxt_fp(fp);
1459                         ret = 1;
1460                         goto done;
1461                 }
1462         }
1463
1464 done:
1465         return ret;
1466 }
1467
1468 static int qib_open(struct inode *in, struct file *fp)
1469 {
1470         /* The real work is performed later in qib_assign_ctxt() */
1471         fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1472         if (fp->private_data) /* no cpu affinity by default */
1473                 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1474         return fp->private_data ? 0 : -ENOMEM;
1475 }
1476
1477 /*
1478  * Get ctxt early, so can set affinity prior to memory allocation.
1479  */
1480 static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1481 {
1482         int ret;
1483         int i_minor;
1484         unsigned swmajor, swminor;
1485
1486         /* Check to be sure we haven't already initialized this file */
1487         if (ctxt_fp(fp)) {
1488                 ret = -EINVAL;
1489                 goto done;
1490         }
1491
1492         /* for now, if major version is different, bail */
1493         swmajor = uinfo->spu_userversion >> 16;
1494         if (swmajor != QIB_USER_SWMAJOR) {
1495                 ret = -ENODEV;
1496                 goto done;
1497         }
1498
1499         swminor = uinfo->spu_userversion & 0xffff;
1500
1501         mutex_lock(&qib_mutex);
1502
1503         if (qib_compatible_subctxts(swmajor, swminor) &&
1504             uinfo->spu_subctxt_cnt) {
1505                 ret = find_shared_ctxt(fp, uinfo);
1506                 if (ret) {
1507                         if (ret > 0)
1508                                 ret = 0;
1509                         goto done_chk_sdma;
1510                 }
1511         }
1512
1513         i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
1514         if (i_minor)
1515                 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1516         else
1517                 ret = get_a_ctxt(fp, uinfo);
1518
1519 done_chk_sdma:
1520         if (!ret) {
1521                 struct qib_filedata *fd = fp->private_data;
1522                 const struct qib_ctxtdata *rcd = fd->rcd;
1523                 const struct qib_devdata *dd = rcd->dd;
1524
1525                 if (dd->flags & QIB_HAS_SEND_DMA) {
1526                         fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1527                                                             dd->unit,
1528                                                             rcd->ctxt,
1529                                                             fd->subctxt);
1530                         if (!fd->pq)
1531                                 ret = -ENOMEM;
1532                 }
1533
1534                 /*
1535                  * If process has NOT already set it's affinity, select and
1536                  * reserve a processor for it, as a rendevous for all
1537                  * users of the driver.  If they don't actually later
1538                  * set affinity to this cpu, or set it to some other cpu,
1539                  * it just means that sooner or later we don't recommend
1540                  * a cpu, and let the scheduler do it's best.
1541                  */
1542                 if (!ret && cpus_weight(current->cpus_allowed) >=
1543                     qib_cpulist_count) {
1544                         int cpu;
1545                         cpu = find_first_zero_bit(qib_cpulist,
1546                                                   qib_cpulist_count);
1547                         if (cpu != qib_cpulist_count) {
1548                                 __set_bit(cpu, qib_cpulist);
1549                                 fd->rec_cpu_num = cpu;
1550                         }
1551                 } else if (cpus_weight(current->cpus_allowed) == 1 &&
1552                         test_bit(first_cpu(current->cpus_allowed),
1553                                  qib_cpulist))
1554                         qib_devinfo(dd->pcidev, "%s PID %u affinity "
1555                                     "set to cpu %d; already allocated\n",
1556                                     current->comm, current->pid,
1557                                     first_cpu(current->cpus_allowed));
1558         }
1559
1560         mutex_unlock(&qib_mutex);
1561
1562 done:
1563         return ret;
1564 }
1565
1566
1567 static int qib_do_user_init(struct file *fp,
1568                             const struct qib_user_info *uinfo)
1569 {
1570         int ret;
1571         struct qib_ctxtdata *rcd = ctxt_fp(fp);
1572         struct qib_devdata *dd;
1573         unsigned uctxt;
1574
1575         /* Subctxts don't need to initialize anything since master did it. */
1576         if (subctxt_fp(fp)) {
1577                 ret = wait_event_interruptible(rcd->wait,
1578                         !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1579                 goto bail;
1580         }
1581
1582         dd = rcd->dd;
1583
1584         /* some ctxts may get extra buffers, calculate that here */
1585         uctxt = rcd->ctxt - dd->first_user_ctxt;
1586         if (uctxt < dd->ctxts_extrabuf) {
1587                 rcd->piocnt = dd->pbufsctxt + 1;
1588                 rcd->pio_base = rcd->piocnt * uctxt;
1589         } else {
1590                 rcd->piocnt = dd->pbufsctxt;
1591                 rcd->pio_base = rcd->piocnt * uctxt +
1592                         dd->ctxts_extrabuf;
1593         }
1594
1595         /*
1596          * All user buffers are 2KB buffers.  If we ever support
1597          * giving 4KB buffers to user processes, this will need some
1598          * work.  Can't use piobufbase directly, because it has
1599          * both 2K and 4K buffer base values.  So check and handle.
1600          */
1601         if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1602                 if (rcd->pio_base >= dd->piobcnt2k) {
1603                         qib_dev_err(dd,
1604                                     "%u:ctxt%u: no 2KB buffers available\n",
1605                                     dd->unit, rcd->ctxt);
1606                         ret = -ENOBUFS;
1607                         goto bail;
1608                 }
1609                 rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1610                 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1611                             rcd->ctxt, rcd->piocnt);
1612         }
1613
1614         rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1615         qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1616                                TXCHK_CHG_TYPE_USER, rcd);
1617         /*
1618          * try to ensure that processes start up with consistent avail update
1619          * for their own range, at least.   If system very quiet, it might
1620          * have the in-memory copy out of date at startup for this range of
1621          * buffers, when a context gets re-used.  Do after the chg_pioavail
1622          * and before the rest of setup, so it's "almost certain" the dma
1623          * will have occurred (can't 100% guarantee, but should be many
1624          * decimals of 9s, with this ordering), given how much else happens
1625          * after this.
1626          */
1627         dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1628
1629         /*
1630          * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1631          * array for time being.  If rcd->ctxt > chip-supported,
1632          * we need to do extra stuff here to handle by handling overflow
1633          * through ctxt 0, someday
1634          */
1635         ret = qib_create_rcvhdrq(dd, rcd);
1636         if (!ret)
1637                 ret = qib_setup_eagerbufs(rcd);
1638         if (ret)
1639                 goto bail_pio;
1640
1641         rcd->tidcursor = 0; /* start at beginning after open */
1642
1643         /* initialize poll variables... */
1644         rcd->urgent = 0;
1645         rcd->urgent_poll = 0;
1646
1647         /*
1648          * Now enable the ctxt for receive.
1649          * For chips that are set to DMA the tail register to memory
1650          * when they change (and when the update bit transitions from
1651          * 0 to 1.  So for those chips, we turn it off and then back on.
1652          * This will (very briefly) affect any other open ctxts, but the
1653          * duration is very short, and therefore isn't an issue.  We
1654          * explictly set the in-memory tail copy to 0 beforehand, so we
1655          * don't have to wait to be sure the DMA update has happened
1656          * (chip resets head/tail to 0 on transition to enable).
1657          */
1658         if (rcd->rcvhdrtail_kvaddr)
1659                 qib_clear_rcvhdrtail(rcd);
1660
1661         dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1662                       rcd->ctxt);
1663
1664         /* Notify any waiting slaves */
1665         if (rcd->subctxt_cnt) {
1666                 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1667                 wake_up(&rcd->wait);
1668         }
1669         return 0;
1670
1671 bail_pio:
1672         qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1673                                TXCHK_CHG_TYPE_KERN, rcd);
1674 bail:
1675         return ret;
1676 }
1677
1678 /**
1679  * unlock_exptid - unlock any expected TID entries context still had in use
1680  * @rcd: ctxt
1681  *
1682  * We don't actually update the chip here, because we do a bulk update
1683  * below, using f_clear_tids.
1684  */
1685 static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1686 {
1687         struct qib_devdata *dd = rcd->dd;
1688         int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1689         int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1690
1691         for (i = ctxt_tidbase; i < maxtid; i++) {
1692                 struct page *p = dd->pageshadow[i];
1693                 dma_addr_t phys;
1694
1695                 if (!p)
1696                         continue;
1697
1698                 phys = dd->physshadow[i];
1699                 dd->physshadow[i] = dd->tidinvalid;
1700                 dd->pageshadow[i] = NULL;
1701                 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1702                                PCI_DMA_FROMDEVICE);
1703                 qib_release_user_pages(&p, 1);
1704                 cnt++;
1705         }
1706 }
1707
1708 static int qib_close(struct inode *in, struct file *fp)
1709 {
1710         int ret = 0;
1711         struct qib_filedata *fd;
1712         struct qib_ctxtdata *rcd;
1713         struct qib_devdata *dd;
1714         unsigned long flags;
1715         unsigned ctxt;
1716         pid_t pid;
1717
1718         mutex_lock(&qib_mutex);
1719
1720         fd = (struct qib_filedata *) fp->private_data;
1721         fp->private_data = NULL;
1722         rcd = fd->rcd;
1723         if (!rcd) {
1724                 mutex_unlock(&qib_mutex);
1725                 goto bail;
1726         }
1727
1728         dd = rcd->dd;
1729
1730         /* ensure all pio buffer writes in progress are flushed */
1731         qib_flush_wc();
1732
1733         /* drain user sdma queue */
1734         if (fd->pq) {
1735                 qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1736                 qib_user_sdma_queue_destroy(fd->pq);
1737         }
1738
1739         if (fd->rec_cpu_num != -1)
1740                 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1741
1742         if (--rcd->cnt) {
1743                 /*
1744                  * XXX If the master closes the context before the slave(s),
1745                  * revoke the mmap for the eager receive queue so
1746                  * the slave(s) don't wait for receive data forever.
1747                  */
1748                 rcd->active_slaves &= ~(1 << fd->subctxt);
1749                 rcd->subpid[fd->subctxt] = 0;
1750                 mutex_unlock(&qib_mutex);
1751                 goto bail;
1752         }
1753
1754         /* early; no interrupt users after this */
1755         spin_lock_irqsave(&dd->uctxt_lock, flags);
1756         ctxt = rcd->ctxt;
1757         dd->rcd[ctxt] = NULL;
1758         pid = rcd->pid;
1759         rcd->pid = 0;
1760         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1761
1762         if (rcd->rcvwait_to || rcd->piowait_to ||
1763             rcd->rcvnowait || rcd->pionowait) {
1764                 rcd->rcvwait_to = 0;
1765                 rcd->piowait_to = 0;
1766                 rcd->rcvnowait = 0;
1767                 rcd->pionowait = 0;
1768         }
1769         if (rcd->flag)
1770                 rcd->flag = 0;
1771
1772         if (dd->kregbase) {
1773                 /* atomically clear receive enable ctxt and intr avail. */
1774                 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1775                                   QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1776
1777                 /* clean up the pkeys for this ctxt user */
1778                 qib_clean_part_key(rcd, dd);
1779                 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1780                 qib_chg_pioavailkernel(dd, rcd->pio_base,
1781                                        rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1782
1783                 dd->f_clear_tids(dd, rcd);
1784
1785                 if (dd->pageshadow)
1786                         unlock_expected_tids(rcd);
1787                 qib_stats.sps_ctxts--;
1788         }
1789
1790         mutex_unlock(&qib_mutex);
1791         qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1792
1793 bail:
1794         kfree(fd);
1795         return ret;
1796 }
1797
1798 static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1799 {
1800         struct qib_ctxt_info info;
1801         int ret;
1802         size_t sz;
1803         struct qib_ctxtdata *rcd = ctxt_fp(fp);
1804         struct qib_filedata *fd;
1805
1806         fd = (struct qib_filedata *) fp->private_data;
1807
1808         info.num_active = qib_count_active_units();
1809         info.unit = rcd->dd->unit;
1810         info.port = rcd->ppd->port;
1811         info.ctxt = rcd->ctxt;
1812         info.subctxt =  subctxt_fp(fp);
1813         /* Number of user ctxts available for this device. */
1814         info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1815         info.num_subctxts = rcd->subctxt_cnt;
1816         info.rec_cpu = fd->rec_cpu_num;
1817         sz = sizeof(info);
1818
1819         if (copy_to_user(uinfo, &info, sz)) {
1820                 ret = -EFAULT;
1821                 goto bail;
1822         }
1823         ret = 0;
1824
1825 bail:
1826         return ret;
1827 }
1828
1829 static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1830                                  u32 __user *inflightp)
1831 {
1832         const u32 val = qib_user_sdma_inflight_counter(pq);
1833
1834         if (put_user(val, inflightp))
1835                 return -EFAULT;
1836
1837         return 0;
1838 }
1839
1840 static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1841                                  struct qib_user_sdma_queue *pq,
1842                                  u32 __user *completep)
1843 {
1844         u32 val;
1845         int err;
1846
1847         if (!pq)
1848                 return -EINVAL;
1849
1850         err = qib_user_sdma_make_progress(ppd, pq);
1851         if (err < 0)
1852                 return err;
1853
1854         val = qib_user_sdma_complete_counter(pq);
1855         if (put_user(val, completep))
1856                 return -EFAULT;
1857
1858         return 0;
1859 }
1860
1861 static int disarm_req_delay(struct qib_ctxtdata *rcd)
1862 {
1863         int ret = 0;
1864
1865         if (!usable(rcd->ppd, 1)) {
1866                 int i;
1867                 /*
1868                  * if link is down, or otherwise not usable, delay
1869                  * the caller up to 30 seconds, so we don't thrash
1870                  * in trying to get the chip back to ACTIVE, and
1871                  * set flag so they make the call again.
1872                  */
1873                 if (rcd->user_event_mask) {
1874                         /*
1875                          * subctxt_cnt is 0 if not shared, so do base
1876                          * separately, first, then remaining subctxt, if any
1877                          */
1878                         set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1879                                 &rcd->user_event_mask[0]);
1880                         for (i = 1; i < rcd->subctxt_cnt; i++)
1881                                 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1882                                         &rcd->user_event_mask[i]);
1883                 }
1884                 for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
1885                         msleep(100);
1886                 ret = -ENETDOWN;
1887         }
1888         return ret;
1889 }
1890
1891 /*
1892  * Find all user contexts in use, and set the specified bit in their
1893  * event mask.
1894  * See also find_ctxt() for a similar use, that is specific to send buffers.
1895  */
1896 int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1897 {
1898         struct qib_ctxtdata *rcd;
1899         unsigned ctxt;
1900         int ret = 0;
1901
1902         spin_lock(&ppd->dd->uctxt_lock);
1903         for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
1904              ctxt++) {
1905                 rcd = ppd->dd->rcd[ctxt];
1906                 if (!rcd)
1907                         continue;
1908                 if (rcd->user_event_mask) {
1909                         int i;
1910                         /*
1911                          * subctxt_cnt is 0 if not shared, so do base
1912                          * separately, first, then remaining subctxt, if any
1913                          */
1914                         set_bit(evtbit, &rcd->user_event_mask[0]);
1915                         for (i = 1; i < rcd->subctxt_cnt; i++)
1916                                 set_bit(evtbit, &rcd->user_event_mask[i]);
1917                 }
1918                 ret = 1;
1919                 break;
1920         }
1921         spin_unlock(&ppd->dd->uctxt_lock);
1922
1923         return ret;
1924 }
1925
1926 /*
1927  * clear the event notifier events for this context.
1928  * For the DISARM_BUFS case, we also take action (this obsoletes
1929  * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
1930  * compatibility.
1931  * Other bits don't currently require actions, just atomically clear.
1932  * User process then performs actions appropriate to bit having been
1933  * set, if desired, and checks again in future.
1934  */
1935 static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
1936                               unsigned long events)
1937 {
1938         int ret = 0, i;
1939
1940         for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
1941                 if (!test_bit(i, &events))
1942                         continue;
1943                 if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
1944                         (void)qib_disarm_piobufs_ifneeded(rcd);
1945                         ret = disarm_req_delay(rcd);
1946                 } else
1947                         clear_bit(i, &rcd->user_event_mask[subctxt]);
1948         }
1949         return ret;
1950 }
1951
1952 static ssize_t qib_write(struct file *fp, const char __user *data,
1953                          size_t count, loff_t *off)
1954 {
1955         const struct qib_cmd __user *ucmd;
1956         struct qib_ctxtdata *rcd;
1957         const void __user *src;
1958         size_t consumed, copy = 0;
1959         struct qib_cmd cmd;
1960         ssize_t ret = 0;
1961         void *dest;
1962
1963         if (count < sizeof(cmd.type)) {
1964                 ret = -EINVAL;
1965                 goto bail;
1966         }
1967
1968         ucmd = (const struct qib_cmd __user *) data;
1969
1970         if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
1971                 ret = -EFAULT;
1972                 goto bail;
1973         }
1974
1975         consumed = sizeof(cmd.type);
1976
1977         switch (cmd.type) {
1978         case QIB_CMD_ASSIGN_CTXT:
1979         case QIB_CMD_USER_INIT:
1980                 copy = sizeof(cmd.cmd.user_info);
1981                 dest = &cmd.cmd.user_info;
1982                 src = &ucmd->cmd.user_info;
1983                 break;
1984
1985         case QIB_CMD_RECV_CTRL:
1986                 copy = sizeof(cmd.cmd.recv_ctrl);
1987                 dest = &cmd.cmd.recv_ctrl;
1988                 src = &ucmd->cmd.recv_ctrl;
1989                 break;
1990
1991         case QIB_CMD_CTXT_INFO:
1992                 copy = sizeof(cmd.cmd.ctxt_info);
1993                 dest = &cmd.cmd.ctxt_info;
1994                 src = &ucmd->cmd.ctxt_info;
1995                 break;
1996
1997         case QIB_CMD_TID_UPDATE:
1998         case QIB_CMD_TID_FREE:
1999                 copy = sizeof(cmd.cmd.tid_info);
2000                 dest = &cmd.cmd.tid_info;
2001                 src = &ucmd->cmd.tid_info;
2002                 break;
2003
2004         case QIB_CMD_SET_PART_KEY:
2005                 copy = sizeof(cmd.cmd.part_key);
2006                 dest = &cmd.cmd.part_key;
2007                 src = &ucmd->cmd.part_key;
2008                 break;
2009
2010         case QIB_CMD_DISARM_BUFS:
2011         case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2012                 copy = 0;
2013                 src = NULL;
2014                 dest = NULL;
2015                 break;
2016
2017         case QIB_CMD_POLL_TYPE:
2018                 copy = sizeof(cmd.cmd.poll_type);
2019                 dest = &cmd.cmd.poll_type;
2020                 src = &ucmd->cmd.poll_type;
2021                 break;
2022
2023         case QIB_CMD_ARMLAUNCH_CTRL:
2024                 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2025                 dest = &cmd.cmd.armlaunch_ctrl;
2026                 src = &ucmd->cmd.armlaunch_ctrl;
2027                 break;
2028
2029         case QIB_CMD_SDMA_INFLIGHT:
2030                 copy = sizeof(cmd.cmd.sdma_inflight);
2031                 dest = &cmd.cmd.sdma_inflight;
2032                 src = &ucmd->cmd.sdma_inflight;
2033                 break;
2034
2035         case QIB_CMD_SDMA_COMPLETE:
2036                 copy = sizeof(cmd.cmd.sdma_complete);
2037                 dest = &cmd.cmd.sdma_complete;
2038                 src = &ucmd->cmd.sdma_complete;
2039                 break;
2040
2041         case QIB_CMD_ACK_EVENT:
2042                 copy = sizeof(cmd.cmd.event_mask);
2043                 dest = &cmd.cmd.event_mask;
2044                 src = &ucmd->cmd.event_mask;
2045                 break;
2046
2047         default:
2048                 ret = -EINVAL;
2049                 goto bail;
2050         }
2051
2052         if (copy) {
2053                 if ((count - consumed) < copy) {
2054                         ret = -EINVAL;
2055                         goto bail;
2056                 }
2057                 if (copy_from_user(dest, src, copy)) {
2058                         ret = -EFAULT;
2059                         goto bail;
2060                 }
2061                 consumed += copy;
2062         }
2063
2064         rcd = ctxt_fp(fp);
2065         if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2066                 ret = -EINVAL;
2067                 goto bail;
2068         }
2069
2070         switch (cmd.type) {
2071         case QIB_CMD_ASSIGN_CTXT:
2072                 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2073                 if (ret)
2074                         goto bail;
2075                 break;
2076
2077         case QIB_CMD_USER_INIT:
2078                 ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2079                 if (ret)
2080                         goto bail;
2081                 ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2082                                         cmd.cmd.user_info.spu_base_info,
2083                                         cmd.cmd.user_info.spu_base_info_size);
2084                 break;
2085
2086         case QIB_CMD_RECV_CTRL:
2087                 ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2088                 break;
2089
2090         case QIB_CMD_CTXT_INFO:
2091                 ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2092                                     (unsigned long) cmd.cmd.ctxt_info);
2093                 break;
2094
2095         case QIB_CMD_TID_UPDATE:
2096                 ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2097                 break;
2098
2099         case QIB_CMD_TID_FREE:
2100                 ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2101                 break;
2102
2103         case QIB_CMD_SET_PART_KEY:
2104                 ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2105                 break;
2106
2107         case QIB_CMD_DISARM_BUFS:
2108                 (void)qib_disarm_piobufs_ifneeded(rcd);
2109                 ret = disarm_req_delay(rcd);
2110                 break;
2111
2112         case QIB_CMD_PIOAVAILUPD:
2113                 qib_force_pio_avail_update(rcd->dd);
2114                 break;
2115
2116         case QIB_CMD_POLL_TYPE:
2117                 rcd->poll_type = cmd.cmd.poll_type;
2118                 break;
2119
2120         case QIB_CMD_ARMLAUNCH_CTRL:
2121                 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2122                 break;
2123
2124         case QIB_CMD_SDMA_INFLIGHT:
2125                 ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2126                                             (u32 __user *) (unsigned long)
2127                                             cmd.cmd.sdma_inflight);
2128                 break;
2129
2130         case QIB_CMD_SDMA_COMPLETE:
2131                 ret = qib_sdma_get_complete(rcd->ppd,
2132                                             user_sdma_queue_fp(fp),
2133                                             (u32 __user *) (unsigned long)
2134                                             cmd.cmd.sdma_complete);
2135                 break;
2136
2137         case QIB_CMD_ACK_EVENT:
2138                 ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2139                                          cmd.cmd.event_mask);
2140                 break;
2141         }
2142
2143         if (ret >= 0)
2144                 ret = consumed;
2145
2146 bail:
2147         return ret;
2148 }
2149
2150 static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
2151                              unsigned long dim, loff_t off)
2152 {
2153         struct qib_filedata *fp = iocb->ki_filp->private_data;
2154         struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2155         struct qib_user_sdma_queue *pq = fp->pq;
2156
2157         if (!dim || !pq)
2158                 return -EINVAL;
2159
2160         return qib_user_sdma_writev(rcd, pq, iov, dim);
2161 }
2162
2163 static struct class *qib_class;
2164 static dev_t qib_dev;
2165
2166 int qib_cdev_init(int minor, const char *name,
2167                   const struct file_operations *fops,
2168                   struct cdev **cdevp, struct device **devp)
2169 {
2170         const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2171         struct cdev *cdev;
2172         struct device *device = NULL;
2173         int ret;
2174
2175         cdev = cdev_alloc();
2176         if (!cdev) {
2177                 printk(KERN_ERR QIB_DRV_NAME
2178                        ": Could not allocate cdev for minor %d, %s\n",
2179                        minor, name);
2180                 ret = -ENOMEM;
2181                 goto done;
2182         }
2183
2184         cdev->owner = THIS_MODULE;
2185         cdev->ops = fops;
2186         kobject_set_name(&cdev->kobj, name);
2187
2188         ret = cdev_add(cdev, dev, 1);
2189         if (ret < 0) {
2190                 printk(KERN_ERR QIB_DRV_NAME
2191                        ": Could not add cdev for minor %d, %s (err %d)\n",
2192                        minor, name, -ret);
2193                 goto err_cdev;
2194         }
2195
2196         device = device_create(qib_class, NULL, dev, NULL, name);
2197         if (!IS_ERR(device))
2198                 goto done;
2199         ret = PTR_ERR(device);
2200         device = NULL;
2201         printk(KERN_ERR QIB_DRV_NAME ": Could not create "
2202                "device for minor %d, %s (err %d)\n",
2203                minor, name, -ret);
2204 err_cdev:
2205         cdev_del(cdev);
2206         cdev = NULL;
2207 done:
2208         *cdevp = cdev;
2209         *devp = device;
2210         return ret;
2211 }
2212
2213 void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2214 {
2215         struct device *device = *devp;
2216
2217         if (device) {
2218                 device_unregister(device);
2219                 *devp = NULL;
2220         }
2221
2222         if (*cdevp) {
2223                 cdev_del(*cdevp);
2224                 *cdevp = NULL;
2225         }
2226 }
2227
2228 static struct cdev *wildcard_cdev;
2229 static struct device *wildcard_device;
2230
2231 int __init qib_dev_init(void)
2232 {
2233         int ret;
2234
2235         ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2236         if (ret < 0) {
2237                 printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
2238                        "chrdev region (err %d)\n", -ret);
2239                 goto done;
2240         }
2241
2242         qib_class = class_create(THIS_MODULE, "ipath");
2243         if (IS_ERR(qib_class)) {
2244                 ret = PTR_ERR(qib_class);
2245                 printk(KERN_ERR QIB_DRV_NAME ": Could not create "
2246                        "device class (err %d)\n", -ret);
2247                 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2248         }
2249
2250 done:
2251         return ret;
2252 }
2253
2254 void qib_dev_cleanup(void)
2255 {
2256         if (qib_class) {
2257                 class_destroy(qib_class);
2258                 qib_class = NULL;
2259         }
2260
2261         unregister_chrdev_region(qib_dev, QIB_NMINORS);
2262 }
2263
2264 static atomic_t user_count = ATOMIC_INIT(0);
2265
2266 static void qib_user_remove(struct qib_devdata *dd)
2267 {
2268         if (atomic_dec_return(&user_count) == 0)
2269                 qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2270
2271         qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2272 }
2273
2274 static int qib_user_add(struct qib_devdata *dd)
2275 {
2276         char name[10];
2277         int ret;
2278
2279         if (atomic_inc_return(&user_count) == 1) {
2280                 ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2281                                     &wildcard_cdev, &wildcard_device);
2282                 if (ret)
2283                         goto done;
2284         }
2285
2286         snprintf(name, sizeof(name), "ipath%d", dd->unit);
2287         ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2288                             &dd->user_cdev, &dd->user_device);
2289         if (ret)
2290                 qib_user_remove(dd);
2291 done:
2292         return ret;
2293 }
2294
2295 /*
2296  * Create per-unit files in /dev
2297  */
2298 int qib_device_create(struct qib_devdata *dd)
2299 {
2300         int r, ret;
2301
2302         r = qib_user_add(dd);
2303         ret = qib_diag_add(dd);
2304         if (r && !ret)
2305                 ret = r;
2306         return ret;
2307 }
2308
2309 /*
2310  * Remove per-unit files in /dev
2311  * void, core kernel returns no errors for this stuff
2312  */
2313 void qib_device_remove(struct qib_devdata *dd)
2314 {
2315         qib_user_remove(dd);
2316         qib_diag_remove(dd);
2317 }