Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / infiniband / hw / qib / qib_user_sdma.c
1 /*
2  * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44
45 #include "qib.h"
46 #include "qib_user_sdma.h"
47
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
54
55 struct qib_user_sdma_pkt {
56         u8 naddr;               /* dimension of addr (1..3) ... */
57         u32 counter;            /* sdma pkts queued counter for this entry */
58         u64 added;              /* global descq number of entries */
59
60         struct {
61                 u32 offset;                     /* offset for kvaddr, addr */
62                 u32 length;                     /* length in page */
63                 u8  put_page;                   /* should we put_page? */
64                 u8  dma_mapped;                 /* is page dma_mapped? */
65                 struct page *page;              /* may be NULL (coherent mem) */
66                 void *kvaddr;                   /* FIXME: only for pio hack */
67                 dma_addr_t addr;
68         } addr[4];   /* max pages, any more and we coalesce */
69         struct list_head list;  /* list element */
70 };
71
72 struct qib_user_sdma_queue {
73         /*
74          * pkts sent to dma engine are queued on this
75          * list head.  the type of the elements of this
76          * list are struct qib_user_sdma_pkt...
77          */
78         struct list_head sent;
79
80         /* headers with expected length are allocated from here... */
81         char header_cache_name[64];
82         struct dma_pool *header_cache;
83
84         /* packets are allocated from the slab cache... */
85         char pkt_slab_name[64];
86         struct kmem_cache *pkt_slab;
87
88         /* as packets go on the queued queue, they are counted... */
89         u32 counter;
90         u32 sent_counter;
91
92         /* dma page table */
93         struct rb_root dma_pages_root;
94
95         /* protect everything above... */
96         struct mutex lock;
97 };
98
99 struct qib_user_sdma_queue *
100 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
101 {
102         struct qib_user_sdma_queue *pq =
103                 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
104
105         if (!pq)
106                 goto done;
107
108         pq->counter = 0;
109         pq->sent_counter = 0;
110         INIT_LIST_HEAD(&pq->sent);
111
112         mutex_init(&pq->lock);
113
114         snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115                  "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
116         pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117                                          sizeof(struct qib_user_sdma_pkt),
118                                          0, 0, NULL);
119
120         if (!pq->pkt_slab)
121                 goto err_kfree;
122
123         snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124                  "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
125         pq->header_cache = dma_pool_create(pq->header_cache_name,
126                                            dev,
127                                            QIB_USER_SDMA_EXP_HEADER_LENGTH,
128                                            4, 0);
129         if (!pq->header_cache)
130                 goto err_slab;
131
132         pq->dma_pages_root = RB_ROOT;
133
134         goto done;
135
136 err_slab:
137         kmem_cache_destroy(pq->pkt_slab);
138 err_kfree:
139         kfree(pq);
140         pq = NULL;
141
142 done:
143         return pq;
144 }
145
146 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
147                                     int i, size_t offset, size_t len,
148                                     int put_page, int dma_mapped,
149                                     struct page *page,
150                                     void *kvaddr, dma_addr_t dma_addr)
151 {
152         pkt->addr[i].offset = offset;
153         pkt->addr[i].length = len;
154         pkt->addr[i].put_page = put_page;
155         pkt->addr[i].dma_mapped = dma_mapped;
156         pkt->addr[i].page = page;
157         pkt->addr[i].kvaddr = kvaddr;
158         pkt->addr[i].addr = dma_addr;
159 }
160
161 static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
162                                       u32 counter, size_t offset,
163                                       size_t len, int dma_mapped,
164                                       struct page *page,
165                                       void *kvaddr, dma_addr_t dma_addr)
166 {
167         pkt->naddr = 1;
168         pkt->counter = counter;
169         qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170                                 kvaddr, dma_addr);
171 }
172
173 /* we've too many pages in the iovec, coalesce to a single page */
174 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
175                                   struct qib_user_sdma_pkt *pkt,
176                                   const struct iovec *iov,
177                                   unsigned long niov)
178 {
179         int ret = 0;
180         struct page *page = alloc_page(GFP_KERNEL);
181         void *mpage_save;
182         char *mpage;
183         int i;
184         int len = 0;
185         dma_addr_t dma_addr;
186
187         if (!page) {
188                 ret = -ENOMEM;
189                 goto done;
190         }
191
192         mpage = kmap(page);
193         mpage_save = mpage;
194         for (i = 0; i < niov; i++) {
195                 int cfur;
196
197                 cfur = copy_from_user(mpage,
198                                       iov[i].iov_base, iov[i].iov_len);
199                 if (cfur) {
200                         ret = -EFAULT;
201                         goto free_unmap;
202                 }
203
204                 mpage += iov[i].iov_len;
205                 len += iov[i].iov_len;
206         }
207
208         dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
209                                 DMA_TO_DEVICE);
210         if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
211                 ret = -ENOMEM;
212                 goto free_unmap;
213         }
214
215         qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
216                                 dma_addr);
217         pkt->naddr = 2;
218
219         goto done;
220
221 free_unmap:
222         kunmap(page);
223         __free_page(page);
224 done:
225         return ret;
226 }
227
228 /*
229  * How many pages in this iovec element?
230  */
231 static int qib_user_sdma_num_pages(const struct iovec *iov)
232 {
233         const unsigned long addr  = (unsigned long) iov->iov_base;
234         const unsigned long  len  = iov->iov_len;
235         const unsigned long spage = addr & PAGE_MASK;
236         const unsigned long epage = (addr + len - 1) & PAGE_MASK;
237
238         return 1 + ((epage - spage) >> PAGE_SHIFT);
239 }
240
241 /*
242  * Truncate length to page boundry.
243  */
244 static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
245 {
246         const unsigned long offset = addr & ~PAGE_MASK;
247
248         return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
249 }
250
251 static void qib_user_sdma_free_pkt_frag(struct device *dev,
252                                         struct qib_user_sdma_queue *pq,
253                                         struct qib_user_sdma_pkt *pkt,
254                                         int frag)
255 {
256         const int i = frag;
257
258         if (pkt->addr[i].page) {
259                 if (pkt->addr[i].dma_mapped)
260                         dma_unmap_page(dev,
261                                        pkt->addr[i].addr,
262                                        pkt->addr[i].length,
263                                        DMA_TO_DEVICE);
264
265                 if (pkt->addr[i].kvaddr)
266                         kunmap(pkt->addr[i].page);
267
268                 if (pkt->addr[i].put_page)
269                         put_page(pkt->addr[i].page);
270                 else
271                         __free_page(pkt->addr[i].page);
272         } else if (pkt->addr[i].kvaddr)
273                 /* free coherent mem from cache... */
274                 dma_pool_free(pq->header_cache,
275                               pkt->addr[i].kvaddr, pkt->addr[i].addr);
276 }
277
278 /* return number of pages pinned... */
279 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
280                                    struct qib_user_sdma_pkt *pkt,
281                                    unsigned long addr, int tlen, int npages)
282 {
283         struct page *pages[2];
284         int j;
285         int ret;
286
287         ret = get_user_pages(current, current->mm, addr,
288                              npages, 0, 1, pages, NULL);
289
290         if (ret != npages) {
291                 int i;
292
293                 for (i = 0; i < ret; i++)
294                         put_page(pages[i]);
295
296                 ret = -ENOMEM;
297                 goto done;
298         }
299
300         for (j = 0; j < npages; j++) {
301                 /* map the pages... */
302                 const int flen = qib_user_sdma_page_length(addr, tlen);
303                 dma_addr_t dma_addr =
304                         dma_map_page(&dd->pcidev->dev,
305                                      pages[j], 0, flen, DMA_TO_DEVICE);
306                 unsigned long fofs = addr & ~PAGE_MASK;
307
308                 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
309                         ret = -ENOMEM;
310                         goto done;
311                 }
312
313                 qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
314                                         pages[j], kmap(pages[j]), dma_addr);
315
316                 pkt->naddr++;
317                 addr += flen;
318                 tlen -= flen;
319         }
320
321 done:
322         return ret;
323 }
324
325 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
326                                  struct qib_user_sdma_queue *pq,
327                                  struct qib_user_sdma_pkt *pkt,
328                                  const struct iovec *iov,
329                                  unsigned long niov)
330 {
331         int ret = 0;
332         unsigned long idx;
333
334         for (idx = 0; idx < niov; idx++) {
335                 const int npages = qib_user_sdma_num_pages(iov + idx);
336                 const unsigned long addr = (unsigned long) iov[idx].iov_base;
337
338                 ret = qib_user_sdma_pin_pages(dd, pkt, addr,
339                                               iov[idx].iov_len, npages);
340                 if (ret < 0)
341                         goto free_pkt;
342         }
343
344         goto done;
345
346 free_pkt:
347         for (idx = 0; idx < pkt->naddr; idx++)
348                 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
349
350 done:
351         return ret;
352 }
353
354 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
355                                       struct qib_user_sdma_queue *pq,
356                                       struct qib_user_sdma_pkt *pkt,
357                                       const struct iovec *iov,
358                                       unsigned long niov, int npages)
359 {
360         int ret = 0;
361
362         if (npages >= ARRAY_SIZE(pkt->addr))
363                 ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
364         else
365                 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
366
367         return ret;
368 }
369
370 /* free a packet list -- return counter value of last packet */
371 static void qib_user_sdma_free_pkt_list(struct device *dev,
372                                         struct qib_user_sdma_queue *pq,
373                                         struct list_head *list)
374 {
375         struct qib_user_sdma_pkt *pkt, *pkt_next;
376
377         list_for_each_entry_safe(pkt, pkt_next, list, list) {
378                 int i;
379
380                 for (i = 0; i < pkt->naddr; i++)
381                         qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
382
383                 kmem_cache_free(pq->pkt_slab, pkt);
384         }
385 }
386
387 /*
388  * copy headers, coalesce etc -- pq->lock must be held
389  *
390  * we queue all the packets to list, returning the
391  * number of bytes total.  list must be empty initially,
392  * as, if there is an error we clean it...
393  */
394 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
395                                     struct qib_user_sdma_queue *pq,
396                                     struct list_head *list,
397                                     const struct iovec *iov,
398                                     unsigned long niov,
399                                     int maxpkts)
400 {
401         unsigned long idx = 0;
402         int ret = 0;
403         int npkts = 0;
404         struct page *page = NULL;
405         __le32 *pbc;
406         dma_addr_t dma_addr;
407         struct qib_user_sdma_pkt *pkt = NULL;
408         size_t len;
409         size_t nw;
410         u32 counter = pq->counter;
411         int dma_mapped = 0;
412
413         while (idx < niov && npkts < maxpkts) {
414                 const unsigned long addr = (unsigned long) iov[idx].iov_base;
415                 const unsigned long idx_save = idx;
416                 unsigned pktnw;
417                 unsigned pktnwc;
418                 int nfrags = 0;
419                 int npages = 0;
420                 int cfur;
421
422                 dma_mapped = 0;
423                 len = iov[idx].iov_len;
424                 nw = len >> 2;
425                 page = NULL;
426
427                 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
428                 if (!pkt) {
429                         ret = -ENOMEM;
430                         goto free_list;
431                 }
432
433                 if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
434                     len > PAGE_SIZE || len & 3 || addr & 3) {
435                         ret = -EINVAL;
436                         goto free_pkt;
437                 }
438
439                 if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
440                         pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
441                                              &dma_addr);
442                 else
443                         pbc = NULL;
444
445                 if (!pbc) {
446                         page = alloc_page(GFP_KERNEL);
447                         if (!page) {
448                                 ret = -ENOMEM;
449                                 goto free_pkt;
450                         }
451                         pbc = kmap(page);
452                 }
453
454                 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
455                 if (cfur) {
456                         ret = -EFAULT;
457                         goto free_pbc;
458                 }
459
460                 /*
461                  * This assignment is a bit strange.  it's because the
462                  * the pbc counts the number of 32 bit words in the full
463                  * packet _except_ the first word of the pbc itself...
464                  */
465                 pktnwc = nw - 1;
466
467                 /*
468                  * pktnw computation yields the number of 32 bit words
469                  * that the caller has indicated in the PBC.  note that
470                  * this is one less than the total number of words that
471                  * goes to the send DMA engine as the first 32 bit word
472                  * of the PBC itself is not counted.  Armed with this count,
473                  * we can verify that the packet is consistent with the
474                  * iovec lengths.
475                  */
476                 pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
477                 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
478                         ret = -EINVAL;
479                         goto free_pbc;
480                 }
481
482                 idx++;
483                 while (pktnwc < pktnw && idx < niov) {
484                         const size_t slen = iov[idx].iov_len;
485                         const unsigned long faddr =
486                                 (unsigned long) iov[idx].iov_base;
487
488                         if (slen & 3 || faddr & 3 || !slen ||
489                             slen > PAGE_SIZE) {
490                                 ret = -EINVAL;
491                                 goto free_pbc;
492                         }
493
494                         npages++;
495                         if ((faddr & PAGE_MASK) !=
496                             ((faddr + slen - 1) & PAGE_MASK))
497                                 npages++;
498
499                         pktnwc += slen >> 2;
500                         idx++;
501                         nfrags++;
502                 }
503
504                 if (pktnwc != pktnw) {
505                         ret = -EINVAL;
506                         goto free_pbc;
507                 }
508
509                 if (page) {
510                         dma_addr = dma_map_page(&dd->pcidev->dev,
511                                                 page, 0, len, DMA_TO_DEVICE);
512                         if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
513                                 ret = -ENOMEM;
514                                 goto free_pbc;
515                         }
516
517                         dma_mapped = 1;
518                 }
519
520                 qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
521                                           page, pbc, dma_addr);
522
523                 if (nfrags) {
524                         ret = qib_user_sdma_init_payload(dd, pq, pkt,
525                                                          iov + idx_save + 1,
526                                                          nfrags, npages);
527                         if (ret < 0)
528                                 goto free_pbc_dma;
529                 }
530
531                 counter++;
532                 npkts++;
533
534                 list_add_tail(&pkt->list, list);
535         }
536
537         ret = idx;
538         goto done;
539
540 free_pbc_dma:
541         if (dma_mapped)
542                 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
543 free_pbc:
544         if (page) {
545                 kunmap(page);
546                 __free_page(page);
547         } else
548                 dma_pool_free(pq->header_cache, pbc, dma_addr);
549 free_pkt:
550         kmem_cache_free(pq->pkt_slab, pkt);
551 free_list:
552         qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
553 done:
554         return ret;
555 }
556
557 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
558                                                u32 c)
559 {
560         pq->sent_counter = c;
561 }
562
563 /* try to clean out queue -- needs pq->lock */
564 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
565                                      struct qib_user_sdma_queue *pq)
566 {
567         struct qib_devdata *dd = ppd->dd;
568         struct list_head free_list;
569         struct qib_user_sdma_pkt *pkt;
570         struct qib_user_sdma_pkt *pkt_prev;
571         int ret = 0;
572
573         INIT_LIST_HEAD(&free_list);
574
575         list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
576                 s64 descd = ppd->sdma_descq_removed - pkt->added;
577
578                 if (descd < 0)
579                         break;
580
581                 list_move_tail(&pkt->list, &free_list);
582
583                 /* one more packet cleaned */
584                 ret++;
585         }
586
587         if (!list_empty(&free_list)) {
588                 u32 counter;
589
590                 pkt = list_entry(free_list.prev,
591                                  struct qib_user_sdma_pkt, list);
592                 counter = pkt->counter;
593
594                 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
595                 qib_user_sdma_set_complete_counter(pq, counter);
596         }
597
598         return ret;
599 }
600
601 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
602 {
603         if (!pq)
604                 return;
605
606         kmem_cache_destroy(pq->pkt_slab);
607         dma_pool_destroy(pq->header_cache);
608         kfree(pq);
609 }
610
611 /* clean descriptor queue, returns > 0 if some elements cleaned */
612 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
613 {
614         int ret;
615         unsigned long flags;
616
617         spin_lock_irqsave(&ppd->sdma_lock, flags);
618         ret = qib_sdma_make_progress(ppd);
619         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
620
621         return ret;
622 }
623
624 /* we're in close, drain packets so that we can cleanup successfully... */
625 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
626                                struct qib_user_sdma_queue *pq)
627 {
628         struct qib_devdata *dd = ppd->dd;
629         int i;
630
631         if (!pq)
632                 return;
633
634         for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
635                 mutex_lock(&pq->lock);
636                 if (list_empty(&pq->sent)) {
637                         mutex_unlock(&pq->lock);
638                         break;
639                 }
640                 qib_user_sdma_hwqueue_clean(ppd);
641                 qib_user_sdma_queue_clean(ppd, pq);
642                 mutex_unlock(&pq->lock);
643                 msleep(10);
644         }
645
646         if (!list_empty(&pq->sent)) {
647                 struct list_head free_list;
648
649                 qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
650                 INIT_LIST_HEAD(&free_list);
651                 mutex_lock(&pq->lock);
652                 list_splice_init(&pq->sent, &free_list);
653                 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
654                 mutex_unlock(&pq->lock);
655         }
656 }
657
658 static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
659                                          u64 addr, u64 dwlen, u64 dwoffset)
660 {
661         u8 tmpgen;
662
663         tmpgen = ppd->sdma_generation;
664
665         return cpu_to_le64(/* SDmaPhyAddr[31:0] */
666                            ((addr & 0xfffffffcULL) << 32) |
667                            /* SDmaGeneration[1:0] */
668                            ((tmpgen & 3ULL) << 30) |
669                            /* SDmaDwordCount[10:0] */
670                            ((dwlen & 0x7ffULL) << 16) |
671                            /* SDmaBufOffset[12:2] */
672                            (dwoffset & 0x7ffULL));
673 }
674
675 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
676 {
677         return descq | cpu_to_le64(1ULL << 12);
678 }
679
680 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
681 {
682                                               /* last */  /* dma head */
683         return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
684 }
685
686 static inline __le64 qib_sdma_make_desc1(u64 addr)
687 {
688         /* SDmaPhyAddr[47:32] */
689         return cpu_to_le64(addr >> 32);
690 }
691
692 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
693                                     struct qib_user_sdma_pkt *pkt, int idx,
694                                     unsigned ofs, u16 tail)
695 {
696         const u64 addr = (u64) pkt->addr[idx].addr +
697                 (u64) pkt->addr[idx].offset;
698         const u64 dwlen = (u64) pkt->addr[idx].length / 4;
699         __le64 *descqp;
700         __le64 descq0;
701
702         descqp = &ppd->sdma_descq[tail].qw[0];
703
704         descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
705         if (idx == 0)
706                 descq0 = qib_sdma_make_first_desc0(descq0);
707         if (idx == pkt->naddr - 1)
708                 descq0 = qib_sdma_make_last_desc0(descq0);
709
710         descqp[0] = descq0;
711         descqp[1] = qib_sdma_make_desc1(addr);
712 }
713
714 /* pq->lock must be held, get packets on the wire... */
715 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
716                                    struct qib_user_sdma_queue *pq,
717                                    struct list_head *pktlist)
718 {
719         struct qib_devdata *dd = ppd->dd;
720         int ret = 0;
721         unsigned long flags;
722         u16 tail;
723         u8 generation;
724         u64 descq_added;
725
726         if (list_empty(pktlist))
727                 return 0;
728
729         if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
730                 return -ECOMM;
731
732         spin_lock_irqsave(&ppd->sdma_lock, flags);
733
734         /* keep a copy for restoring purposes in case of problems */
735         generation = ppd->sdma_generation;
736         descq_added = ppd->sdma_descq_added;
737
738         if (unlikely(!__qib_sdma_running(ppd))) {
739                 ret = -ECOMM;
740                 goto unlock;
741         }
742
743         tail = ppd->sdma_descq_tail;
744         while (!list_empty(pktlist)) {
745                 struct qib_user_sdma_pkt *pkt =
746                         list_entry(pktlist->next, struct qib_user_sdma_pkt,
747                                    list);
748                 int i;
749                 unsigned ofs = 0;
750                 u16 dtail = tail;
751
752                 if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
753                         goto unlock_check_tail;
754
755                 for (i = 0; i < pkt->naddr; i++) {
756                         qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
757                         ofs += pkt->addr[i].length >> 2;
758
759                         if (++tail == ppd->sdma_descq_cnt) {
760                                 tail = 0;
761                                 ++ppd->sdma_generation;
762                         }
763                 }
764
765                 if ((ofs << 2) > ppd->ibmaxlen) {
766                         ret = -EMSGSIZE;
767                         goto unlock;
768                 }
769
770                 /*
771                  * If the packet is >= 2KB mtu equivalent, we have to use
772                  * the large buffers, and have to mark each descriptor as
773                  * part of a large buffer packet.
774                  */
775                 if (ofs > dd->piosize2kmax_dwords) {
776                         for (i = 0; i < pkt->naddr; i++) {
777                                 ppd->sdma_descq[dtail].qw[0] |=
778                                         cpu_to_le64(1ULL << 14);
779                                 if (++dtail == ppd->sdma_descq_cnt)
780                                         dtail = 0;
781                         }
782                 }
783
784                 ppd->sdma_descq_added += pkt->naddr;
785                 pkt->added = ppd->sdma_descq_added;
786                 list_move_tail(&pkt->list, &pq->sent);
787                 ret++;
788         }
789
790 unlock_check_tail:
791         /* advance the tail on the chip if necessary */
792         if (ppd->sdma_descq_tail != tail)
793                 dd->f_sdma_update_tail(ppd, tail);
794
795 unlock:
796         if (unlikely(ret < 0)) {
797                 ppd->sdma_generation = generation;
798                 ppd->sdma_descq_added = descq_added;
799         }
800         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
801
802         return ret;
803 }
804
805 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
806                          struct qib_user_sdma_queue *pq,
807                          const struct iovec *iov,
808                          unsigned long dim)
809 {
810         struct qib_devdata *dd = rcd->dd;
811         struct qib_pportdata *ppd = rcd->ppd;
812         int ret = 0;
813         struct list_head list;
814         int npkts = 0;
815
816         INIT_LIST_HEAD(&list);
817
818         mutex_lock(&pq->lock);
819
820         /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
821         if (!qib_sdma_running(ppd))
822                 goto done_unlock;
823
824         if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
825                 qib_user_sdma_hwqueue_clean(ppd);
826                 qib_user_sdma_queue_clean(ppd, pq);
827         }
828
829         while (dim) {
830                 const int mxp = 8;
831
832                 down_write(&current->mm->mmap_sem);
833                 ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
834                 up_write(&current->mm->mmap_sem);
835
836                 if (ret <= 0)
837                         goto done_unlock;
838                 else {
839                         dim -= ret;
840                         iov += ret;
841                 }
842
843                 /* force packets onto the sdma hw queue... */
844                 if (!list_empty(&list)) {
845                         /*
846                          * Lazily clean hw queue.  the 4 is a guess of about
847                          * how many sdma descriptors a packet will take (it
848                          * doesn't have to be perfect).
849                          */
850                         if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
851                                 qib_user_sdma_hwqueue_clean(ppd);
852                                 qib_user_sdma_queue_clean(ppd, pq);
853                         }
854
855                         ret = qib_user_sdma_push_pkts(ppd, pq, &list);
856                         if (ret < 0)
857                                 goto done_unlock;
858                         else {
859                                 npkts += ret;
860                                 pq->counter += ret;
861
862                                 if (!list_empty(&list))
863                                         goto done_unlock;
864                         }
865                 }
866         }
867
868 done_unlock:
869         if (!list_empty(&list))
870                 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
871         mutex_unlock(&pq->lock);
872
873         return (ret < 0) ? ret : npkts;
874 }
875
876 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
877                                 struct qib_user_sdma_queue *pq)
878 {
879         int ret = 0;
880
881         mutex_lock(&pq->lock);
882         qib_user_sdma_hwqueue_clean(ppd);
883         ret = qib_user_sdma_queue_clean(ppd, pq);
884         mutex_unlock(&pq->lock);
885
886         return ret;
887 }
888
889 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
890 {
891         return pq ? pq->sent_counter : 0;
892 }
893
894 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
895 {
896         return pq ? pq->counter : 0;
897 }