4 * Scatterlist handling helpers.
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
25 struct scatterlist *sg_next(struct scatterlist *sg)
27 #ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
39 EXPORT_SYMBOL(sg_next);
42 * sg_nents - return total count of entries in scatterlist
43 * @sg: The scatterlist
46 * Allows to know how many entries are in sg, taking into acount
50 int sg_nents(struct scatterlist *sg)
53 for (nents = 0; sg; sg = sg_next(sg))
57 EXPORT_SYMBOL(sg_nents);
60 * sg_nents_for_len - return total count of entries in scatterlist
61 * needed to satisfy the supplied length
62 * @sg: The scatterlist
63 * @len: The total required length
66 * Determines the number of entries in sg that are required to meet
67 * the supplied length, taking into acount chaining as well
70 * the number of sg entries needed, negative error on failure
73 int sg_nents_for_len(struct scatterlist *sg, u64 len)
81 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
90 EXPORT_SYMBOL(sg_nents_for_len);
93 * sg_last - return the last scatterlist entry in a list
94 * @sgl: First entry in the scatterlist
95 * @nents: Number of entries in the scatterlist
98 * Should only be used casually, it (currently) scans the entire list
99 * to get the last entry.
101 * Note that the @sgl@ pointer passed in need not be the first one,
102 * the important bit is that @nents@ denotes the number of entries that
106 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
108 struct scatterlist *sg, *ret = NULL;
111 for_each_sg(sgl, sg, nents, i)
114 #ifdef CONFIG_DEBUG_SG
115 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
116 BUG_ON(!sg_is_last(ret));
120 EXPORT_SYMBOL(sg_last);
123 * sg_init_table - Initialize SG table
125 * @nents: Number of entries in table
128 * If this is part of a chained sg table, sg_mark_end() should be
129 * used only on the last table part.
132 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
134 memset(sgl, 0, sizeof(*sgl) * nents);
135 #ifdef CONFIG_DEBUG_SG
138 for (i = 0; i < nents; i++)
139 sgl[i].sg_magic = SG_MAGIC;
142 sg_mark_end(&sgl[nents - 1]);
144 EXPORT_SYMBOL(sg_init_table);
147 * sg_init_one - Initialize a single entry sg list
149 * @buf: Virtual address for IO
153 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
155 sg_init_table(sg, 1);
156 sg_set_buf(sg, buf, buflen);
158 EXPORT_SYMBOL(sg_init_one);
161 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
164 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
166 if (nents == SG_MAX_SINGLE_ALLOC) {
168 * Kmemleak doesn't track page allocations as they are not
169 * commonly used (in a raw form) for kernel data structures.
170 * As we chain together a list of pages and then a normal
171 * kmalloc (tracked by kmemleak), in order to for that last
172 * allocation not to become decoupled (and thus a
173 * false-positive) we need to inform kmemleak of all the
174 * intermediate allocations.
176 void *ptr = (void *) __get_free_page(gfp_mask);
177 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
180 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
183 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
185 if (nents == SG_MAX_SINGLE_ALLOC) {
187 free_page((unsigned long) sg);
193 * __sg_free_table - Free a previously mapped sg table
194 * @table: The sg table header to use
195 * @max_ents: The maximum number of entries per single scatterlist
196 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
197 * @free_fn: Free function
200 * Free an sg table previously allocated and setup with
201 * __sg_alloc_table(). The @max_ents value must be identical to
202 * that previously used with __sg_alloc_table().
205 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
206 bool skip_first_chunk, sg_free_fn *free_fn)
208 struct scatterlist *sgl, *next;
210 if (unlikely(!table->sgl))
214 while (table->orig_nents) {
215 unsigned int alloc_size = table->orig_nents;
216 unsigned int sg_size;
219 * If we have more than max_ents segments left,
220 * then assign 'next' to the sg table after the current one.
221 * sg_size is then one less than alloc size, since the last
222 * element is the chain pointer.
224 if (alloc_size > max_ents) {
225 next = sg_chain_ptr(&sgl[max_ents - 1]);
226 alloc_size = max_ents;
227 sg_size = alloc_size - 1;
229 sg_size = alloc_size;
233 table->orig_nents -= sg_size;
234 if (skip_first_chunk)
235 skip_first_chunk = false;
237 free_fn(sgl, alloc_size);
243 EXPORT_SYMBOL(__sg_free_table);
246 * sg_free_table - Free a previously allocated sg table
247 * @table: The mapped sg table header
250 void sg_free_table(struct sg_table *table)
252 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
254 EXPORT_SYMBOL(sg_free_table);
257 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
258 * @table: The sg table header to use
259 * @nents: Number of entries in sg list
260 * @max_ents: The maximum number of entries the allocator returns per call
261 * @gfp_mask: GFP allocation mask
262 * @alloc_fn: Allocator to use
265 * This function returns a @table @nents long. The allocator is
266 * defined to return scatterlist chunks of maximum size @max_ents.
267 * Thus if @nents is bigger than @max_ents, the scatterlists will be
268 * chained in units of @max_ents.
271 * If this function returns non-0 (eg failure), the caller must call
272 * __sg_free_table() to cleanup any leftover allocations.
275 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
276 unsigned int max_ents, struct scatterlist *first_chunk,
277 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
279 struct scatterlist *sg, *prv;
282 memset(table, 0, sizeof(*table));
286 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
287 if (WARN_ON_ONCE(nents > max_ents))
294 unsigned int sg_size, alloc_size = left;
296 if (alloc_size > max_ents) {
297 alloc_size = max_ents;
298 sg_size = alloc_size - 1;
300 sg_size = alloc_size;
308 sg = alloc_fn(alloc_size, gfp_mask);
312 * Adjust entry count to reflect that the last
313 * entry of the previous table won't be used for
314 * linkage. Without this, sg_kfree() may get
318 table->nents = ++table->orig_nents;
323 sg_init_table(sg, alloc_size);
324 table->nents = table->orig_nents += sg_size;
327 * If this is the first mapping, assign the sg table header.
328 * If this is not the first mapping, chain previous part.
331 sg_chain(prv, max_ents, sg);
336 * If no more entries after this one, mark the end
339 sg_mark_end(&sg[sg_size - 1]);
346 EXPORT_SYMBOL(__sg_alloc_table);
349 * sg_alloc_table - Allocate and initialize an sg table
350 * @table: The sg table header to use
351 * @nents: Number of entries in sg list
352 * @gfp_mask: GFP allocation mask
355 * Allocate and initialize an sg table. If @nents@ is larger than
356 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
359 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
363 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
364 NULL, gfp_mask, sg_kmalloc);
366 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
370 EXPORT_SYMBOL(sg_alloc_table);
373 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
375 * @sgt: The sg table header to use
376 * @pages: Pointer to an array of page pointers
377 * @n_pages: Number of pages in the pages array
378 * @offset: Offset from start of the first page to the start of a buffer
379 * @size: Number of valid bytes in the buffer (after offset)
380 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
381 * @gfp_mask: GFP allocation mask
384 * Allocate and initialize an sg table from a list of pages. Contiguous
385 * ranges of the pages are squashed into a single scatterlist node up to the
386 * maximum size specified in @max_segment. An user may provide an offset at a
387 * start and a size of valid data in a buffer specified by the page array.
388 * The returned sg table is released by sg_free_table.
391 * 0 on success, negative error on failure
393 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
394 unsigned int n_pages, unsigned int offset,
395 unsigned long size, unsigned int max_segment,
398 unsigned int chunks, cur_page, seg_len, i;
400 struct scatterlist *s;
402 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
405 /* compute number of contiguous chunks */
408 for (i = 1; i < n_pages; i++) {
409 seg_len += PAGE_SIZE;
410 if (seg_len >= max_segment ||
411 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
417 ret = sg_alloc_table(sgt, chunks, gfp_mask);
421 /* merging chunks and putting them into the scatterlist */
423 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
424 unsigned int j, chunk_size;
426 /* look for the end of the current chunk */
428 for (j = cur_page + 1; j < n_pages; j++) {
429 seg_len += PAGE_SIZE;
430 if (seg_len >= max_segment ||
431 page_to_pfn(pages[j]) !=
432 page_to_pfn(pages[j - 1]) + 1)
436 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
437 sg_set_page(s, pages[cur_page],
438 min_t(unsigned long, size, chunk_size), offset);
446 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
449 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
451 * @sgt: The sg table header to use
452 * @pages: Pointer to an array of page pointers
453 * @n_pages: Number of pages in the pages array
454 * @offset: Offset from start of the first page to the start of a buffer
455 * @size: Number of valid bytes in the buffer (after offset)
456 * @gfp_mask: GFP allocation mask
459 * Allocate and initialize an sg table from a list of pages. Contiguous
460 * ranges of the pages are squashed into a single scatterlist node. A user
461 * may provide an offset at a start and a size of valid data in a buffer
462 * specified by the page array. The returned sg table is released by
466 * 0 on success, negative error on failure
468 int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
469 unsigned int n_pages, unsigned int offset,
470 unsigned long size, gfp_t gfp_mask)
472 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
473 SCATTERLIST_MAX_SEGMENT, gfp_mask);
475 EXPORT_SYMBOL(sg_alloc_table_from_pages);
477 #ifdef CONFIG_SGL_ALLOC
480 * sgl_alloc_order - allocate a scatterlist and its pages
481 * @length: Length in bytes of the scatterlist. Must be at least one
482 * @order: Second argument for alloc_pages()
483 * @chainable: Whether or not to allocate an extra element in the scatterlist
484 * for scatterlist chaining purposes
485 * @gfp: Memory allocation flags
486 * @nent_p: [out] Number of entries in the scatterlist that have pages
488 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
490 struct scatterlist *sgl_alloc_order(unsigned long long length,
491 unsigned int order, bool chainable,
492 gfp_t gfp, unsigned int *nent_p)
494 struct scatterlist *sgl, *sg;
496 unsigned int nent, nalloc;
499 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
500 /* Check for integer overflow */
501 if (length > (nent << (PAGE_SHIFT + order)))
505 /* Check for integer overflow */
506 if (nalloc + 1 < nalloc)
510 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
511 (gfp & ~GFP_DMA) | __GFP_ZERO);
515 sg_init_table(sgl, nalloc);
518 elem_len = min_t(u64, length, PAGE_SIZE << order);
519 page = alloc_pages(gfp, order);
525 sg_set_page(sg, page, elem_len, 0);
529 WARN_ONCE(length, "length = %lld\n", length);
534 EXPORT_SYMBOL(sgl_alloc_order);
537 * sgl_alloc - allocate a scatterlist and its pages
538 * @length: Length in bytes of the scatterlist
539 * @gfp: Memory allocation flags
540 * @nent_p: [out] Number of entries in the scatterlist
542 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
544 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
545 unsigned int *nent_p)
547 return sgl_alloc_order(length, 0, false, gfp, nent_p);
549 EXPORT_SYMBOL(sgl_alloc);
552 * sgl_free_n_order - free a scatterlist and its pages
553 * @sgl: Scatterlist with one or more elements
554 * @nents: Maximum number of elements to free
555 * @order: Second argument for __free_pages()
558 * - If several scatterlists have been chained and each chain element is
559 * freed separately then it's essential to set nents correctly to avoid that a
560 * page would get freed twice.
561 * - All pages in a chained scatterlist can be freed at once by setting @nents
564 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
566 struct scatterlist *sg;
570 for_each_sg(sgl, sg, nents, i) {
575 __free_pages(page, order);
579 EXPORT_SYMBOL(sgl_free_n_order);
582 * sgl_free_order - free a scatterlist and its pages
583 * @sgl: Scatterlist with one or more elements
584 * @order: Second argument for __free_pages()
586 void sgl_free_order(struct scatterlist *sgl, int order)
588 sgl_free_n_order(sgl, INT_MAX, order);
590 EXPORT_SYMBOL(sgl_free_order);
593 * sgl_free - free a scatterlist and its pages
594 * @sgl: Scatterlist with one or more elements
596 void sgl_free(struct scatterlist *sgl)
598 sgl_free_order(sgl, 0);
600 EXPORT_SYMBOL(sgl_free);
602 #endif /* CONFIG_SGL_ALLOC */
604 void __sg_page_iter_start(struct sg_page_iter *piter,
605 struct scatterlist *sglist, unsigned int nents,
606 unsigned long pgoffset)
608 piter->__pg_advance = 0;
609 piter->__nents = nents;
612 piter->sg_pgoffset = pgoffset;
614 EXPORT_SYMBOL(__sg_page_iter_start);
616 static int sg_page_count(struct scatterlist *sg)
618 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
621 bool __sg_page_iter_next(struct sg_page_iter *piter)
623 if (!piter->__nents || !piter->sg)
626 piter->sg_pgoffset += piter->__pg_advance;
627 piter->__pg_advance = 1;
629 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
630 piter->sg_pgoffset -= sg_page_count(piter->sg);
631 piter->sg = sg_next(piter->sg);
632 if (!--piter->__nents || !piter->sg)
638 EXPORT_SYMBOL(__sg_page_iter_next);
641 * sg_miter_start - start mapping iteration over a sg list
642 * @miter: sg mapping iter to be started
643 * @sgl: sg list to iterate over
644 * @nents: number of sg entries
647 * Starts mapping iterator @miter.
652 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
653 unsigned int nents, unsigned int flags)
655 memset(miter, 0, sizeof(struct sg_mapping_iter));
657 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
658 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
659 miter->__flags = flags;
661 EXPORT_SYMBOL(sg_miter_start);
663 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
665 if (!miter->__remaining) {
666 struct scatterlist *sg;
667 unsigned long pgoffset;
669 if (!__sg_page_iter_next(&miter->piter))
672 sg = miter->piter.sg;
673 pgoffset = miter->piter.sg_pgoffset;
675 miter->__offset = pgoffset ? 0 : sg->offset;
676 miter->__remaining = sg->offset + sg->length -
677 (pgoffset << PAGE_SHIFT) - miter->__offset;
678 miter->__remaining = min_t(unsigned long, miter->__remaining,
679 PAGE_SIZE - miter->__offset);
686 * sg_miter_skip - reposition mapping iterator
687 * @miter: sg mapping iter to be skipped
688 * @offset: number of bytes to plus the current location
691 * Sets the offset of @miter to its current location plus @offset bytes.
692 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
696 * Don't care if @miter is stopped, or not proceeded yet.
697 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
700 * true if @miter contains the valid mapping. false if end of sg
703 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
705 sg_miter_stop(miter);
710 if (!sg_miter_get_next_page(miter))
713 consumed = min_t(off_t, offset, miter->__remaining);
714 miter->__offset += consumed;
715 miter->__remaining -= consumed;
721 EXPORT_SYMBOL(sg_miter_skip);
724 * sg_miter_next - proceed mapping iterator to the next mapping
725 * @miter: sg mapping iter to proceed
728 * Proceeds @miter to the next mapping. @miter should have been started
729 * using sg_miter_start(). On successful return, @miter->page,
730 * @miter->addr and @miter->length point to the current mapping.
733 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
734 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
737 * true if @miter contains the next mapping. false if end of sg
740 bool sg_miter_next(struct sg_mapping_iter *miter)
742 sg_miter_stop(miter);
745 * Get to the next page if necessary.
746 * __remaining, __offset is adjusted by sg_miter_stop
748 if (!sg_miter_get_next_page(miter))
751 miter->page = sg_page_iter_page(&miter->piter);
752 miter->consumed = miter->length = miter->__remaining;
754 if (miter->__flags & SG_MITER_ATOMIC)
755 miter->addr = kmap_atomic(miter->page) + miter->__offset;
757 miter->addr = kmap(miter->page) + miter->__offset;
761 EXPORT_SYMBOL(sg_miter_next);
764 * sg_miter_stop - stop mapping iteration
765 * @miter: sg mapping iter to be stopped
768 * Stops mapping iterator @miter. @miter should have been started
769 * using sg_miter_start(). A stopped iteration can be resumed by
770 * calling sg_miter_next() on it. This is useful when resources (kmap)
771 * need to be released during iteration.
774 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
777 void sg_miter_stop(struct sg_mapping_iter *miter)
779 WARN_ON(miter->consumed > miter->length);
781 /* drop resources from the last iteration */
783 miter->__offset += miter->consumed;
784 miter->__remaining -= miter->consumed;
786 if ((miter->__flags & SG_MITER_TO_SG) &&
787 !PageSlab(miter->page))
788 flush_kernel_dcache_page(miter->page);
790 if (miter->__flags & SG_MITER_ATOMIC) {
791 WARN_ON_ONCE(preemptible());
792 kunmap_atomic(miter->addr);
802 EXPORT_SYMBOL(sg_miter_stop);
805 * sg_copy_buffer - Copy data between a linear buffer and an SG list
807 * @nents: Number of SG entries
808 * @buf: Where to copy from
809 * @buflen: The number of bytes to copy
810 * @skip: Number of bytes to skip before copying
811 * @to_buffer: transfer direction (true == from an sg list to a
812 * buffer, false == from a buffer to an sg list
814 * Returns the number of copied bytes.
817 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
818 size_t buflen, off_t skip, bool to_buffer)
820 unsigned int offset = 0;
821 struct sg_mapping_iter miter;
822 unsigned int sg_flags = SG_MITER_ATOMIC;
825 sg_flags |= SG_MITER_FROM_SG;
827 sg_flags |= SG_MITER_TO_SG;
829 sg_miter_start(&miter, sgl, nents, sg_flags);
831 if (!sg_miter_skip(&miter, skip))
834 while ((offset < buflen) && sg_miter_next(&miter)) {
837 len = min(miter.length, buflen - offset);
840 memcpy(buf + offset, miter.addr, len);
842 memcpy(miter.addr, buf + offset, len);
847 sg_miter_stop(&miter);
851 EXPORT_SYMBOL(sg_copy_buffer);
854 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
856 * @nents: Number of SG entries
857 * @buf: Where to copy from
858 * @buflen: The number of bytes to copy
860 * Returns the number of copied bytes.
863 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
864 const void *buf, size_t buflen)
866 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
868 EXPORT_SYMBOL(sg_copy_from_buffer);
871 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
873 * @nents: Number of SG entries
874 * @buf: Where to copy to
875 * @buflen: The number of bytes to copy
877 * Returns the number of copied bytes.
880 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
881 void *buf, size_t buflen)
883 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
885 EXPORT_SYMBOL(sg_copy_to_buffer);
888 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
890 * @nents: Number of SG entries
891 * @buf: Where to copy from
892 * @buflen: The number of bytes to copy
893 * @skip: Number of bytes to skip before copying
895 * Returns the number of copied bytes.
898 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
899 const void *buf, size_t buflen, off_t skip)
901 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
903 EXPORT_SYMBOL(sg_pcopy_from_buffer);
906 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
908 * @nents: Number of SG entries
909 * @buf: Where to copy to
910 * @buflen: The number of bytes to copy
911 * @skip: Number of bytes to skip before copying
913 * Returns the number of copied bytes.
916 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
917 void *buf, size_t buflen, off_t skip)
919 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
921 EXPORT_SYMBOL(sg_pcopy_to_buffer);
924 * sg_zero_buffer - Zero-out a part of a SG list
926 * @nents: Number of SG entries
927 * @buflen: The number of bytes to zero out
928 * @skip: Number of bytes to skip before zeroing
930 * Returns the number of bytes zeroed.
932 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
933 size_t buflen, off_t skip)
935 unsigned int offset = 0;
936 struct sg_mapping_iter miter;
937 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
939 sg_miter_start(&miter, sgl, nents, sg_flags);
941 if (!sg_miter_skip(&miter, skip))
944 while (offset < buflen && sg_miter_next(&miter)) {
947 len = min(miter.length, buflen - offset);
948 memset(miter.addr, 0, len);
953 sg_miter_stop(&miter);
956 EXPORT_SYMBOL(sg_zero_buffer);