2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
4 * Scatterlist handling helpers.
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
25 struct scatterlist *sg_next(struct scatterlist *sg)
27 #ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
39 EXPORT_SYMBOL(sg_next);
42 * sg_last - return the last scatterlist entry in a list
43 * @sgl: First entry in the scatterlist
44 * @nents: Number of entries in the scatterlist
47 * Should only be used casually, it (currently) scans the entire list
48 * to get the last entry.
50 * Note that the @sgl@ pointer passed in need not be the first one,
51 * the important bit is that @nents@ denotes the number of entries that
55 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
57 #ifndef ARCH_HAS_SG_CHAIN
58 struct scatterlist *ret = &sgl[nents - 1];
60 struct scatterlist *sg, *ret = NULL;
63 for_each_sg(sgl, sg, nents, i)
67 #ifdef CONFIG_DEBUG_SG
68 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
69 BUG_ON(!sg_is_last(ret));
73 EXPORT_SYMBOL(sg_last);
76 * sg_init_table - Initialize SG table
78 * @nents: Number of entries in table
81 * If this is part of a chained sg table, sg_mark_end() should be
82 * used only on the last table part.
85 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
87 memset(sgl, 0, sizeof(*sgl) * nents);
88 #ifdef CONFIG_DEBUG_SG
91 for (i = 0; i < nents; i++)
92 sgl[i].sg_magic = SG_MAGIC;
95 sg_mark_end(&sgl[nents - 1]);
97 EXPORT_SYMBOL(sg_init_table);
100 * sg_init_one - Initialize a single entry sg list
102 * @buf: Virtual address for IO
106 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
108 sg_init_table(sg, 1);
109 sg_set_buf(sg, buf, buflen);
111 EXPORT_SYMBOL(sg_init_one);
114 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
117 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
119 if (nents == SG_MAX_SINGLE_ALLOC) {
121 * Kmemleak doesn't track page allocations as they are not
122 * commonly used (in a raw form) for kernel data structures.
123 * As we chain together a list of pages and then a normal
124 * kmalloc (tracked by kmemleak), in order to for that last
125 * allocation not to become decoupled (and thus a
126 * false-positive) we need to inform kmemleak of all the
127 * intermediate allocations.
129 void *ptr = (void *) __get_free_page(gfp_mask);
130 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
133 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
136 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
138 if (nents == SG_MAX_SINGLE_ALLOC) {
140 free_page((unsigned long) sg);
146 * __sg_free_table - Free a previously mapped sg table
147 * @table: The sg table header to use
148 * @max_ents: The maximum number of entries per single scatterlist
149 * @free_fn: Free function
152 * Free an sg table previously allocated and setup with
153 * __sg_alloc_table(). The @max_ents value must be identical to
154 * that previously used with __sg_alloc_table().
157 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
160 struct scatterlist *sgl, *next;
162 if (unlikely(!table->sgl))
166 while (table->orig_nents) {
167 unsigned int alloc_size = table->orig_nents;
168 unsigned int sg_size;
171 * If we have more than max_ents segments left,
172 * then assign 'next' to the sg table after the current one.
173 * sg_size is then one less than alloc size, since the last
174 * element is the chain pointer.
176 if (alloc_size > max_ents) {
177 next = sg_chain_ptr(&sgl[max_ents - 1]);
178 alloc_size = max_ents;
179 sg_size = alloc_size - 1;
181 sg_size = alloc_size;
185 table->orig_nents -= sg_size;
186 free_fn(sgl, alloc_size);
192 EXPORT_SYMBOL(__sg_free_table);
195 * sg_free_table - Free a previously allocated sg table
196 * @table: The mapped sg table header
199 void sg_free_table(struct sg_table *table)
201 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
203 EXPORT_SYMBOL(sg_free_table);
206 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
207 * @table: The sg table header to use
208 * @nents: Number of entries in sg list
209 * @max_ents: The maximum number of entries the allocator returns per call
210 * @gfp_mask: GFP allocation mask
211 * @alloc_fn: Allocator to use
214 * This function returns a @table @nents long. The allocator is
215 * defined to return scatterlist chunks of maximum size @max_ents.
216 * Thus if @nents is bigger than @max_ents, the scatterlists will be
217 * chained in units of @max_ents.
220 * If this function returns non-0 (eg failure), the caller must call
221 * __sg_free_table() to cleanup any leftover allocations.
224 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
225 unsigned int max_ents, gfp_t gfp_mask,
226 sg_alloc_fn *alloc_fn)
228 struct scatterlist *sg, *prv;
231 #ifndef ARCH_HAS_SG_CHAIN
232 BUG_ON(nents > max_ents);
235 memset(table, 0, sizeof(*table));
240 unsigned int sg_size, alloc_size = left;
242 if (alloc_size > max_ents) {
243 alloc_size = max_ents;
244 sg_size = alloc_size - 1;
246 sg_size = alloc_size;
250 sg = alloc_fn(alloc_size, gfp_mask);
253 * Adjust entry count to reflect that the last
254 * entry of the previous table won't be used for
255 * linkage. Without this, sg_kfree() may get
259 table->nents = ++table->orig_nents;
264 sg_init_table(sg, alloc_size);
265 table->nents = table->orig_nents += sg_size;
268 * If this is the first mapping, assign the sg table header.
269 * If this is not the first mapping, chain previous part.
272 sg_chain(prv, max_ents, sg);
277 * If no more entries after this one, mark the end
280 sg_mark_end(&sg[sg_size - 1]);
287 EXPORT_SYMBOL(__sg_alloc_table);
290 * sg_alloc_table - Allocate and initialize an sg table
291 * @table: The sg table header to use
292 * @nents: Number of entries in sg list
293 * @gfp_mask: GFP allocation mask
296 * Allocate and initialize an sg table. If @nents@ is larger than
297 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
300 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
304 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
305 gfp_mask, sg_kmalloc);
307 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
311 EXPORT_SYMBOL(sg_alloc_table);
314 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
316 * @sgt: The sg table header to use
317 * @pages: Pointer to an array of page pointers
318 * @n_pages: Number of pages in the pages array
319 * @offset: Offset from start of the first page to the start of a buffer
320 * @size: Number of valid bytes in the buffer (after offset)
321 * @gfp_mask: GFP allocation mask
324 * Allocate and initialize an sg table from a list of pages. Contiguous
325 * ranges of the pages are squashed into a single scatterlist node. A user
326 * may provide an offset at a start and a size of valid data in a buffer
327 * specified by the page array. The returned sg table is released by
331 * 0 on success, negative error on failure
333 int sg_alloc_table_from_pages(struct sg_table *sgt,
334 struct page **pages, unsigned int n_pages,
335 unsigned long offset, unsigned long size,
340 unsigned int cur_page;
342 struct scatterlist *s;
344 /* compute number of contiguous chunks */
346 for (i = 1; i < n_pages; ++i)
347 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
350 ret = sg_alloc_table(sgt, chunks, gfp_mask);
354 /* merging chunks and putting them into the scatterlist */
356 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
357 unsigned long chunk_size;
360 /* look for the end of the current chunk */
361 for (j = cur_page + 1; j < n_pages; ++j)
362 if (page_to_pfn(pages[j]) !=
363 page_to_pfn(pages[j - 1]) + 1)
366 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
367 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
375 EXPORT_SYMBOL(sg_alloc_table_from_pages);
378 * sg_miter_start - start mapping iteration over a sg list
379 * @miter: sg mapping iter to be started
380 * @sgl: sg list to iterate over
381 * @nents: number of sg entries
384 * Starts mapping iterator @miter.
389 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
390 unsigned int nents, unsigned int flags)
392 memset(miter, 0, sizeof(struct sg_mapping_iter));
395 miter->__nents = nents;
397 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
398 miter->__flags = flags;
400 EXPORT_SYMBOL(sg_miter_start);
403 * sg_miter_next - proceed mapping iterator to the next mapping
404 * @miter: sg mapping iter to proceed
407 * Proceeds @miter to the next mapping. @miter should have been started
408 * using sg_miter_start(). On successful return, @miter->page,
409 * @miter->addr and @miter->length point to the current mapping.
412 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
413 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
416 * true if @miter contains the next mapping. false if end of sg
419 bool sg_miter_next(struct sg_mapping_iter *miter)
421 unsigned int off, len;
423 /* check for end and drop resources from the last iteration */
427 sg_miter_stop(miter);
429 /* get to the next sg if necessary. __offset is adjusted by stop */
430 while (miter->__offset == miter->__sg->length) {
431 if (--miter->__nents) {
432 miter->__sg = sg_next(miter->__sg);
438 /* map the next page */
439 off = miter->__sg->offset + miter->__offset;
440 len = miter->__sg->length - miter->__offset;
442 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
444 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
445 miter->consumed = miter->length;
447 if (miter->__flags & SG_MITER_ATOMIC)
448 miter->addr = kmap_atomic(miter->page) + off;
450 miter->addr = kmap(miter->page) + off;
454 EXPORT_SYMBOL(sg_miter_next);
457 * sg_miter_stop - stop mapping iteration
458 * @miter: sg mapping iter to be stopped
461 * Stops mapping iterator @miter. @miter should have been started
462 * started using sg_miter_start(). A stopped iteration can be
463 * resumed by calling sg_miter_next() on it. This is useful when
464 * resources (kmap) need to be released during iteration.
467 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
470 void sg_miter_stop(struct sg_mapping_iter *miter)
472 WARN_ON(miter->consumed > miter->length);
474 /* drop resources from the last iteration */
476 miter->__offset += miter->consumed;
478 if (miter->__flags & SG_MITER_TO_SG)
479 flush_kernel_dcache_page(miter->page);
481 if (miter->__flags & SG_MITER_ATOMIC) {
482 WARN_ON_ONCE(preemptible());
483 kunmap_atomic(miter->addr);
493 EXPORT_SYMBOL(sg_miter_stop);
496 * sg_copy_buffer - Copy data between a linear buffer and an SG list
498 * @nents: Number of SG entries
499 * @buf: Where to copy from
500 * @buflen: The number of bytes to copy
501 * @to_buffer: transfer direction (non zero == from an sg list to a
502 * buffer, 0 == from a buffer to an sg list
504 * Returns the number of copied bytes.
507 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
508 void *buf, size_t buflen, int to_buffer)
510 unsigned int offset = 0;
511 struct sg_mapping_iter miter;
513 unsigned int sg_flags = SG_MITER_ATOMIC;
516 sg_flags |= SG_MITER_FROM_SG;
518 sg_flags |= SG_MITER_TO_SG;
520 sg_miter_start(&miter, sgl, nents, sg_flags);
522 local_irq_save(flags);
524 while (sg_miter_next(&miter) && offset < buflen) {
527 len = min(miter.length, buflen - offset);
530 memcpy(buf + offset, miter.addr, len);
532 memcpy(miter.addr, buf + offset, len);
537 sg_miter_stop(&miter);
539 local_irq_restore(flags);
544 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
546 * @nents: Number of SG entries
547 * @buf: Where to copy from
548 * @buflen: The number of bytes to copy
550 * Returns the number of copied bytes.
553 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
554 void *buf, size_t buflen)
556 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
558 EXPORT_SYMBOL(sg_copy_from_buffer);
561 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
563 * @nents: Number of SG entries
564 * @buf: Where to copy to
565 * @buflen: The number of bytes to copy
567 * Returns the number of copied bytes.
570 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
571 void *buf, size_t buflen)
573 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
575 EXPORT_SYMBOL(sg_copy_to_buffer);