mlx4_core: Support ICM tables in coherent memory
[pandora-kernel.git] / drivers / net / mlx4 / icm.c
1 /*
2  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/init.h>
35 #include <linux/errno.h>
36 #include <linux/mm.h>
37 #include <linux/scatterlist.h>
38
39 #include <linux/mlx4/cmd.h>
40
41 #include "mlx4.h"
42 #include "icm.h"
43 #include "fw.h"
44
45 /*
46  * We allocate in as big chunks as we can, up to a maximum of 256 KB
47  * per chunk.
48  */
49 enum {
50         MLX4_ICM_ALLOC_SIZE     = 1 << 18,
51         MLX4_TABLE_CHUNK_SIZE   = 1 << 18
52 };
53
54 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
55 {
56         int i;
57
58         if (chunk->nsg > 0)
59                 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
60                              PCI_DMA_BIDIRECTIONAL);
61
62         for (i = 0; i < chunk->npages; ++i)
63                 __free_pages(chunk->mem[i].page,
64                              get_order(chunk->mem[i].length));
65 }
66
67 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
68 {
69         int i;
70
71         for (i = 0; i < chunk->npages; ++i)
72                 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
73                                   lowmem_page_address(chunk->mem[i].page),
74                                   sg_dma_address(&chunk->mem[i]));
75 }
76
77 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
78 {
79         struct mlx4_icm_chunk *chunk, *tmp;
80
81         if (!icm)
82                 return;
83
84         list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
85                 if (coherent)
86                         mlx4_free_icm_coherent(dev, chunk);
87                 else
88                         mlx4_free_icm_pages(dev, chunk);
89
90                 kfree(chunk);
91         }
92
93         kfree(icm);
94 }
95
96 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
97 {
98         mem->page = alloc_pages(gfp_mask, order);
99         if (!mem->page)
100                 return -ENOMEM;
101
102         mem->length = PAGE_SIZE << order;
103         mem->offset = 0;
104         return 0;
105 }
106
107 static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
108                                     int order, gfp_t gfp_mask)
109 {
110         void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
111                                        &sg_dma_address(mem), gfp_mask);
112         if (!buf)
113                 return -ENOMEM;
114
115         sg_set_buf(mem, buf, PAGE_SIZE << order);
116         BUG_ON(mem->offset);
117         sg_dma_len(mem) = PAGE_SIZE << order;
118         return 0;
119 }
120
121 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
122                                 gfp_t gfp_mask, int coherent)
123 {
124         struct mlx4_icm *icm;
125         struct mlx4_icm_chunk *chunk = NULL;
126         int cur_order;
127         int ret;
128
129         /* We use sg_set_buf for coherent allocs, which assumes low memory */
130         BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
131
132         icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
133         if (!icm)
134                 return NULL;
135
136         icm->refcount = 0;
137         INIT_LIST_HEAD(&icm->chunk_list);
138
139         cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
140
141         while (npages > 0) {
142                 if (!chunk) {
143                         chunk = kmalloc(sizeof *chunk,
144                                         gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
145                         if (!chunk)
146                                 goto fail;
147
148                         chunk->npages = 0;
149                         chunk->nsg    = 0;
150                         list_add_tail(&chunk->list, &icm->chunk_list);
151                 }
152
153                 while (1 << cur_order > npages)
154                         --cur_order;
155
156                 if (coherent)
157                         ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
158                                                       &chunk->mem[chunk->npages],
159                                                       cur_order, gfp_mask);
160                 else
161                         ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
162                                                    cur_order, gfp_mask);
163
164                 if (!ret) {
165                         ++chunk->npages;
166
167                         if (coherent)
168                                 ++chunk->nsg;
169                         else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
170                                 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
171                                                         chunk->npages,
172                                                         PCI_DMA_BIDIRECTIONAL);
173
174                                 if (chunk->nsg <= 0)
175                                         goto fail;
176
177                                 chunk = NULL;
178                         }
179
180                         npages -= 1 << cur_order;
181                 } else {
182                         --cur_order;
183                         if (cur_order < 0)
184                                 goto fail;
185                 }
186         }
187
188         if (!coherent && chunk) {
189                 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
190                                         chunk->npages,
191                                         PCI_DMA_BIDIRECTIONAL);
192
193                 if (chunk->nsg <= 0)
194                         goto fail;
195         }
196
197         return icm;
198
199 fail:
200         mlx4_free_icm(dev, icm, coherent);
201         return NULL;
202 }
203
204 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
205 {
206         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
207 }
208
209 int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
210 {
211         return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
212                         MLX4_CMD_TIME_CLASS_B);
213 }
214
215 int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
216 {
217         struct mlx4_cmd_mailbox *mailbox;
218         __be64 *inbox;
219         int err;
220
221         mailbox = mlx4_alloc_cmd_mailbox(dev);
222         if (IS_ERR(mailbox))
223                 return PTR_ERR(mailbox);
224         inbox = mailbox->buf;
225
226         inbox[0] = cpu_to_be64(virt);
227         inbox[1] = cpu_to_be64(dma_addr);
228
229         err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
230                        MLX4_CMD_TIME_CLASS_B);
231
232         mlx4_free_cmd_mailbox(dev, mailbox);
233
234         if (!err)
235                 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
236                           (unsigned long long) dma_addr, (unsigned long long) virt);
237
238         return err;
239 }
240
241 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
242 {
243         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
244 }
245
246 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
247 {
248         return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
249 }
250
251 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
252 {
253         int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
254         int ret = 0;
255
256         mutex_lock(&table->mutex);
257
258         if (table->icm[i]) {
259                 ++table->icm[i]->refcount;
260                 goto out;
261         }
262
263         table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
264                                        (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
265                                        __GFP_NOWARN, table->coherent);
266         if (!table->icm[i]) {
267                 ret = -ENOMEM;
268                 goto out;
269         }
270
271         if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
272                          (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
273                 mlx4_free_icm(dev, table->icm[i], table->coherent);
274                 table->icm[i] = NULL;
275                 ret = -ENOMEM;
276                 goto out;
277         }
278
279         ++table->icm[i]->refcount;
280
281 out:
282         mutex_unlock(&table->mutex);
283         return ret;
284 }
285
286 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
287 {
288         int i;
289
290         i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
291
292         mutex_lock(&table->mutex);
293
294         if (--table->icm[i]->refcount == 0) {
295                 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
296                                MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
297                 mlx4_free_icm(dev, table->icm[i], table->coherent);
298                 table->icm[i] = NULL;
299         }
300
301         mutex_unlock(&table->mutex);
302 }
303
304 void *mlx4_table_find(struct mlx4_icm_table *table, int obj)
305 {
306         int idx, offset, i;
307         struct mlx4_icm_chunk *chunk;
308         struct mlx4_icm *icm;
309         struct page *page = NULL;
310
311         if (!table->lowmem)
312                 return NULL;
313
314         mutex_lock(&table->mutex);
315
316         idx = obj & (table->num_obj - 1);
317         icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)];
318         offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
319
320         if (!icm)
321                 goto out;
322
323         list_for_each_entry(chunk, &icm->chunk_list, list) {
324                 for (i = 0; i < chunk->npages; ++i) {
325                         if (chunk->mem[i].length > offset) {
326                                 page = chunk->mem[i].page;
327                                 goto out;
328                         }
329                         offset -= chunk->mem[i].length;
330                 }
331         }
332
333 out:
334         mutex_unlock(&table->mutex);
335         return page ? lowmem_page_address(page) + offset : NULL;
336 }
337
338 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
339                          int start, int end)
340 {
341         int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
342         int i, err;
343
344         for (i = start; i <= end; i += inc) {
345                 err = mlx4_table_get(dev, table, i);
346                 if (err)
347                         goto fail;
348         }
349
350         return 0;
351
352 fail:
353         while (i > start) {
354                 i -= inc;
355                 mlx4_table_put(dev, table, i);
356         }
357
358         return err;
359 }
360
361 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
362                           int start, int end)
363 {
364         int i;
365
366         for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
367                 mlx4_table_put(dev, table, i);
368 }
369
370 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
371                         u64 virt, int obj_size, int nobj, int reserved,
372                         int use_lowmem, int use_coherent)
373 {
374         int obj_per_chunk;
375         int num_icm;
376         unsigned chunk_size;
377         int i;
378
379         obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
380         num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
381
382         table->icm      = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
383         if (!table->icm)
384                 return -ENOMEM;
385         table->virt     = virt;
386         table->num_icm  = num_icm;
387         table->num_obj  = nobj;
388         table->obj_size = obj_size;
389         table->lowmem   = use_lowmem;
390         table->coherent = use_coherent;
391         mutex_init(&table->mutex);
392
393         for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
394                 chunk_size = MLX4_TABLE_CHUNK_SIZE;
395                 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
396                         chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
397
398                 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
399                                                (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
400                                                __GFP_NOWARN, use_coherent);
401                 if (!table->icm[i])
402                         goto err;
403                 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
404                         mlx4_free_icm(dev, table->icm[i], use_coherent);
405                         table->icm[i] = NULL;
406                         goto err;
407                 }
408
409                 /*
410                  * Add a reference to this ICM chunk so that it never
411                  * gets freed (since it contains reserved firmware objects).
412                  */
413                 ++table->icm[i]->refcount;
414         }
415
416         return 0;
417
418 err:
419         for (i = 0; i < num_icm; ++i)
420                 if (table->icm[i]) {
421                         mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
422                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
423                         mlx4_free_icm(dev, table->icm[i], use_coherent);
424                 }
425
426         return -ENOMEM;
427 }
428
429 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
430 {
431         int i;
432
433         for (i = 0; i < table->num_icm; ++i)
434                 if (table->icm[i]) {
435                         mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
436                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
437                         mlx4_free_icm(dev, table->icm[i], table->coherent);
438                 }
439
440         kfree(table->icm);
441 }