]>
Commit | Line | Data |
---|---|---|
0db9299f JA |
1 | /* |
2 | * Copyright (C) 2007 Jens Axboe <[email protected]> | |
3 | * | |
4 | * Scatterlist handling helpers. | |
5 | * | |
6 | * This source code is licensed under the GNU General Public License, | |
7 | * Version 2. See the file COPYING for more details. | |
8 | */ | |
8bc3bcc9 | 9 | #include <linux/export.h> |
5a0e3ad6 | 10 | #include <linux/slab.h> |
0db9299f | 11 | #include <linux/scatterlist.h> |
b1adaf65 | 12 | #include <linux/highmem.h> |
b94de9bb | 13 | #include <linux/kmemleak.h> |
0db9299f JA |
14 | |
15 | /** | |
16 | * sg_next - return the next scatterlist entry in a list | |
17 | * @sg: The current sg entry | |
18 | * | |
19 | * Description: | |
20 | * Usually the next entry will be @sg@ + 1, but if this sg element is part | |
21 | * of a chained scatterlist, it could jump to the start of a new | |
22 | * scatterlist array. | |
23 | * | |
24 | **/ | |
25 | struct scatterlist *sg_next(struct scatterlist *sg) | |
26 | { | |
27 | #ifdef CONFIG_DEBUG_SG | |
28 | BUG_ON(sg->sg_magic != SG_MAGIC); | |
29 | #endif | |
30 | if (sg_is_last(sg)) | |
31 | return NULL; | |
32 | ||
33 | sg++; | |
34 | if (unlikely(sg_is_chain(sg))) | |
35 | sg = sg_chain_ptr(sg); | |
36 | ||
37 | return sg; | |
38 | } | |
39 | EXPORT_SYMBOL(sg_next); | |
40 | ||
2e484610 ML |
41 | /** |
42 | * sg_nents - return total count of entries in scatterlist | |
43 | * @sg: The scatterlist | |
44 | * | |
45 | * Description: | |
46 | * Allows to know how many entries are in sg, taking into acount | |
47 | * chaining as well | |
48 | * | |
49 | **/ | |
50 | int sg_nents(struct scatterlist *sg) | |
51 | { | |
232f1b51 ML |
52 | int nents; |
53 | for (nents = 0; sg; sg = sg_next(sg)) | |
2e484610 | 54 | nents++; |
2e484610 ML |
55 | return nents; |
56 | } | |
57 | EXPORT_SYMBOL(sg_nents); | |
58 | ||
cfaed10d TL |
59 | /** |
60 | * sg_nents_for_len - return total count of entries in scatterlist | |
61 | * needed to satisfy the supplied length | |
62 | * @sg: The scatterlist | |
63 | * @len: The total required length | |
64 | * | |
65 | * Description: | |
66 | * Determines the number of entries in sg that are required to meet | |
67 | * the supplied length, taking into acount chaining as well | |
68 | * | |
69 | * Returns: | |
70 | * the number of sg entries needed, negative error on failure | |
71 | * | |
72 | **/ | |
73 | int sg_nents_for_len(struct scatterlist *sg, u64 len) | |
74 | { | |
75 | int nents; | |
76 | u64 total; | |
77 | ||
78 | if (!len) | |
79 | return 0; | |
80 | ||
81 | for (nents = 0, total = 0; sg; sg = sg_next(sg)) { | |
82 | nents++; | |
83 | total += sg->length; | |
84 | if (total >= len) | |
85 | return nents; | |
86 | } | |
87 | ||
88 | return -EINVAL; | |
89 | } | |
90 | EXPORT_SYMBOL(sg_nents_for_len); | |
2e484610 | 91 | |
0db9299f JA |
92 | /** |
93 | * sg_last - return the last scatterlist entry in a list | |
94 | * @sgl: First entry in the scatterlist | |
95 | * @nents: Number of entries in the scatterlist | |
96 | * | |
97 | * Description: | |
98 | * Should only be used casually, it (currently) scans the entire list | |
99 | * to get the last entry. | |
100 | * | |
101 | * Note that the @sgl@ pointer passed in need not be the first one, | |
102 | * the important bit is that @nents@ denotes the number of entries that | |
103 | * exist from @sgl@. | |
104 | * | |
105 | **/ | |
106 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) | |
107 | { | |
308c09f1 | 108 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN |
0db9299f JA |
109 | struct scatterlist *ret = &sgl[nents - 1]; |
110 | #else | |
111 | struct scatterlist *sg, *ret = NULL; | |
112 | unsigned int i; | |
113 | ||
114 | for_each_sg(sgl, sg, nents, i) | |
115 | ret = sg; | |
116 | ||
117 | #endif | |
118 | #ifdef CONFIG_DEBUG_SG | |
119 | BUG_ON(sgl[0].sg_magic != SG_MAGIC); | |
120 | BUG_ON(!sg_is_last(ret)); | |
121 | #endif | |
122 | return ret; | |
123 | } | |
124 | EXPORT_SYMBOL(sg_last); | |
125 | ||
126 | /** | |
127 | * sg_init_table - Initialize SG table | |
128 | * @sgl: The SG table | |
129 | * @nents: Number of entries in table | |
130 | * | |
131 | * Notes: | |
132 | * If this is part of a chained sg table, sg_mark_end() should be | |
133 | * used only on the last table part. | |
134 | * | |
135 | **/ | |
136 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) | |
137 | { | |
138 | memset(sgl, 0, sizeof(*sgl) * nents); | |
139 | #ifdef CONFIG_DEBUG_SG | |
140 | { | |
141 | unsigned int i; | |
142 | for (i = 0; i < nents; i++) | |
143 | sgl[i].sg_magic = SG_MAGIC; | |
144 | } | |
145 | #endif | |
146 | sg_mark_end(&sgl[nents - 1]); | |
147 | } | |
148 | EXPORT_SYMBOL(sg_init_table); | |
149 | ||
150 | /** | |
151 | * sg_init_one - Initialize a single entry sg list | |
152 | * @sg: SG entry | |
153 | * @buf: Virtual address for IO | |
154 | * @buflen: IO length | |
155 | * | |
156 | **/ | |
157 | void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) | |
158 | { | |
159 | sg_init_table(sg, 1); | |
160 | sg_set_buf(sg, buf, buflen); | |
161 | } | |
162 | EXPORT_SYMBOL(sg_init_one); | |
163 | ||
164 | /* | |
165 | * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree | |
166 | * helpers. | |
167 | */ | |
168 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) | |
169 | { | |
b94de9bb CW |
170 | if (nents == SG_MAX_SINGLE_ALLOC) { |
171 | /* | |
172 | * Kmemleak doesn't track page allocations as they are not | |
173 | * commonly used (in a raw form) for kernel data structures. | |
174 | * As we chain together a list of pages and then a normal | |
175 | * kmalloc (tracked by kmemleak), in order to for that last | |
176 | * allocation not to become decoupled (and thus a | |
177 | * false-positive) we need to inform kmemleak of all the | |
178 | * intermediate allocations. | |
179 | */ | |
180 | void *ptr = (void *) __get_free_page(gfp_mask); | |
181 | kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); | |
182 | return ptr; | |
183 | } else | |
0db9299f JA |
184 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); |
185 | } | |
186 | ||
187 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) | |
188 | { | |
b94de9bb CW |
189 | if (nents == SG_MAX_SINGLE_ALLOC) { |
190 | kmemleak_free(sg); | |
0db9299f | 191 | free_page((unsigned long) sg); |
b94de9bb | 192 | } else |
0db9299f JA |
193 | kfree(sg); |
194 | } | |
195 | ||
196 | /** | |
197 | * __sg_free_table - Free a previously mapped sg table | |
198 | * @table: The sg table header to use | |
7cedb1f1 | 199 | * @max_ents: The maximum number of entries per single scatterlist |
c53c6d6a | 200 | * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk |
0db9299f JA |
201 | * @free_fn: Free function |
202 | * | |
203 | * Description: | |
7cedb1f1 JB |
204 | * Free an sg table previously allocated and setup with |
205 | * __sg_alloc_table(). The @max_ents value must be identical to | |
206 | * that previously used with __sg_alloc_table(). | |
0db9299f JA |
207 | * |
208 | **/ | |
7cedb1f1 | 209 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
c53c6d6a | 210 | bool skip_first_chunk, sg_free_fn *free_fn) |
0db9299f JA |
211 | { |
212 | struct scatterlist *sgl, *next; | |
213 | ||
214 | if (unlikely(!table->sgl)) | |
215 | return; | |
216 | ||
217 | sgl = table->sgl; | |
218 | while (table->orig_nents) { | |
219 | unsigned int alloc_size = table->orig_nents; | |
220 | unsigned int sg_size; | |
221 | ||
222 | /* | |
7cedb1f1 | 223 | * If we have more than max_ents segments left, |
0db9299f JA |
224 | * then assign 'next' to the sg table after the current one. |
225 | * sg_size is then one less than alloc size, since the last | |
226 | * element is the chain pointer. | |
227 | */ | |
7cedb1f1 JB |
228 | if (alloc_size > max_ents) { |
229 | next = sg_chain_ptr(&sgl[max_ents - 1]); | |
230 | alloc_size = max_ents; | |
0db9299f JA |
231 | sg_size = alloc_size - 1; |
232 | } else { | |
233 | sg_size = alloc_size; | |
234 | next = NULL; | |
235 | } | |
236 | ||
237 | table->orig_nents -= sg_size; | |
c21e59d8 | 238 | if (skip_first_chunk) |
c53c6d6a | 239 | skip_first_chunk = false; |
c21e59d8 TB |
240 | else |
241 | free_fn(sgl, alloc_size); | |
0db9299f JA |
242 | sgl = next; |
243 | } | |
244 | ||
245 | table->sgl = NULL; | |
246 | } | |
247 | EXPORT_SYMBOL(__sg_free_table); | |
248 | ||
249 | /** | |
250 | * sg_free_table - Free a previously allocated sg table | |
251 | * @table: The mapped sg table header | |
252 | * | |
253 | **/ | |
254 | void sg_free_table(struct sg_table *table) | |
255 | { | |
c53c6d6a | 256 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
0db9299f JA |
257 | } |
258 | EXPORT_SYMBOL(sg_free_table); | |
259 | ||
260 | /** | |
261 | * __sg_alloc_table - Allocate and initialize an sg table with given allocator | |
262 | * @table: The sg table header to use | |
263 | * @nents: Number of entries in sg list | |
7cedb1f1 | 264 | * @max_ents: The maximum number of entries the allocator returns per call |
0db9299f JA |
265 | * @gfp_mask: GFP allocation mask |
266 | * @alloc_fn: Allocator to use | |
267 | * | |
7cedb1f1 JB |
268 | * Description: |
269 | * This function returns a @table @nents long. The allocator is | |
270 | * defined to return scatterlist chunks of maximum size @max_ents. | |
271 | * Thus if @nents is bigger than @max_ents, the scatterlists will be | |
272 | * chained in units of @max_ents. | |
273 | * | |
0db9299f JA |
274 | * Notes: |
275 | * If this function returns non-0 (eg failure), the caller must call | |
276 | * __sg_free_table() to cleanup any leftover allocations. | |
277 | * | |
278 | **/ | |
7cedb1f1 | 279 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, |
c53c6d6a CH |
280 | unsigned int max_ents, struct scatterlist *first_chunk, |
281 | gfp_t gfp_mask, sg_alloc_fn *alloc_fn) | |
0db9299f JA |
282 | { |
283 | struct scatterlist *sg, *prv; | |
284 | unsigned int left; | |
285 | ||
27daabd9 DC |
286 | memset(table, 0, sizeof(*table)); |
287 | ||
288 | if (nents == 0) | |
289 | return -EINVAL; | |
308c09f1 | 290 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN |
6fd59a83 NB |
291 | if (WARN_ON_ONCE(nents > max_ents)) |
292 | return -EINVAL; | |
0db9299f JA |
293 | #endif |
294 | ||
0db9299f JA |
295 | left = nents; |
296 | prv = NULL; | |
297 | do { | |
298 | unsigned int sg_size, alloc_size = left; | |
299 | ||
7cedb1f1 JB |
300 | if (alloc_size > max_ents) { |
301 | alloc_size = max_ents; | |
0db9299f JA |
302 | sg_size = alloc_size - 1; |
303 | } else | |
304 | sg_size = alloc_size; | |
305 | ||
306 | left -= sg_size; | |
307 | ||
c53c6d6a CH |
308 | if (first_chunk) { |
309 | sg = first_chunk; | |
310 | first_chunk = NULL; | |
311 | } else { | |
312 | sg = alloc_fn(alloc_size, gfp_mask); | |
313 | } | |
edce6820 JC |
314 | if (unlikely(!sg)) { |
315 | /* | |
316 | * Adjust entry count to reflect that the last | |
317 | * entry of the previous table won't be used for | |
318 | * linkage. Without this, sg_kfree() may get | |
319 | * confused. | |
320 | */ | |
321 | if (prv) | |
322 | table->nents = ++table->orig_nents; | |
323 | ||
324 | return -ENOMEM; | |
325 | } | |
0db9299f JA |
326 | |
327 | sg_init_table(sg, alloc_size); | |
328 | table->nents = table->orig_nents += sg_size; | |
329 | ||
330 | /* | |
331 | * If this is the first mapping, assign the sg table header. | |
332 | * If this is not the first mapping, chain previous part. | |
333 | */ | |
334 | if (prv) | |
7cedb1f1 | 335 | sg_chain(prv, max_ents, sg); |
0db9299f JA |
336 | else |
337 | table->sgl = sg; | |
338 | ||
339 | /* | |
340 | * If no more entries after this one, mark the end | |
341 | */ | |
342 | if (!left) | |
343 | sg_mark_end(&sg[sg_size - 1]); | |
344 | ||
0db9299f JA |
345 | prv = sg; |
346 | } while (left); | |
347 | ||
348 | return 0; | |
349 | } | |
350 | EXPORT_SYMBOL(__sg_alloc_table); | |
351 | ||
352 | /** | |
353 | * sg_alloc_table - Allocate and initialize an sg table | |
354 | * @table: The sg table header to use | |
355 | * @nents: Number of entries in sg list | |
356 | * @gfp_mask: GFP allocation mask | |
357 | * | |
358 | * Description: | |
359 | * Allocate and initialize an sg table. If @nents@ is larger than | |
360 | * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. | |
361 | * | |
362 | **/ | |
363 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |
364 | { | |
365 | int ret; | |
366 | ||
7cedb1f1 | 367 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, |
c53c6d6a | 368 | NULL, gfp_mask, sg_kmalloc); |
0db9299f | 369 | if (unlikely(ret)) |
c53c6d6a | 370 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
0db9299f JA |
371 | |
372 | return ret; | |
373 | } | |
374 | EXPORT_SYMBOL(sg_alloc_table); | |
b1adaf65 | 375 | |
efc42bc9 TS |
376 | /** |
377 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from | |
378 | * an array of pages | |
379 | * @sgt: The sg table header to use | |
380 | * @pages: Pointer to an array of page pointers | |
381 | * @n_pages: Number of pages in the pages array | |
382 | * @offset: Offset from start of the first page to the start of a buffer | |
383 | * @size: Number of valid bytes in the buffer (after offset) | |
384 | * @gfp_mask: GFP allocation mask | |
385 | * | |
386 | * Description: | |
387 | * Allocate and initialize an sg table from a list of pages. Contiguous | |
388 | * ranges of the pages are squashed into a single scatterlist node. A user | |
389 | * may provide an offset at a start and a size of valid data in a buffer | |
390 | * specified by the page array. The returned sg table is released by | |
391 | * sg_free_table. | |
392 | * | |
393 | * Returns: | |
394 | * 0 on success, negative error on failure | |
395 | */ | |
396 | int sg_alloc_table_from_pages(struct sg_table *sgt, | |
397 | struct page **pages, unsigned int n_pages, | |
398 | unsigned long offset, unsigned long size, | |
399 | gfp_t gfp_mask) | |
400 | { | |
401 | unsigned int chunks; | |
402 | unsigned int i; | |
403 | unsigned int cur_page; | |
404 | int ret; | |
405 | struct scatterlist *s; | |
406 | ||
407 | /* compute number of contiguous chunks */ | |
408 | chunks = 1; | |
409 | for (i = 1; i < n_pages; ++i) | |
410 | if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) | |
411 | ++chunks; | |
412 | ||
413 | ret = sg_alloc_table(sgt, chunks, gfp_mask); | |
414 | if (unlikely(ret)) | |
415 | return ret; | |
416 | ||
417 | /* merging chunks and putting them into the scatterlist */ | |
418 | cur_page = 0; | |
419 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | |
420 | unsigned long chunk_size; | |
421 | unsigned int j; | |
422 | ||
423 | /* look for the end of the current chunk */ | |
424 | for (j = cur_page + 1; j < n_pages; ++j) | |
425 | if (page_to_pfn(pages[j]) != | |
426 | page_to_pfn(pages[j - 1]) + 1) | |
427 | break; | |
428 | ||
429 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; | |
430 | sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); | |
431 | size -= chunk_size; | |
432 | offset = 0; | |
433 | cur_page = j; | |
434 | } | |
435 | ||
436 | return 0; | |
437 | } | |
438 | EXPORT_SYMBOL(sg_alloc_table_from_pages); | |
439 | ||
a321e91b ID |
440 | void __sg_page_iter_start(struct sg_page_iter *piter, |
441 | struct scatterlist *sglist, unsigned int nents, | |
442 | unsigned long pgoffset) | |
443 | { | |
444 | piter->__pg_advance = 0; | |
445 | piter->__nents = nents; | |
446 | ||
a321e91b ID |
447 | piter->sg = sglist; |
448 | piter->sg_pgoffset = pgoffset; | |
449 | } | |
450 | EXPORT_SYMBOL(__sg_page_iter_start); | |
451 | ||
452 | static int sg_page_count(struct scatterlist *sg) | |
453 | { | |
454 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; | |
455 | } | |
456 | ||
457 | bool __sg_page_iter_next(struct sg_page_iter *piter) | |
458 | { | |
459 | if (!piter->__nents || !piter->sg) | |
460 | return false; | |
461 | ||
462 | piter->sg_pgoffset += piter->__pg_advance; | |
463 | piter->__pg_advance = 1; | |
464 | ||
465 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { | |
466 | piter->sg_pgoffset -= sg_page_count(piter->sg); | |
467 | piter->sg = sg_next(piter->sg); | |
468 | if (!--piter->__nents || !piter->sg) | |
469 | return false; | |
470 | } | |
a321e91b ID |
471 | |
472 | return true; | |
473 | } | |
474 | EXPORT_SYMBOL(__sg_page_iter_next); | |
475 | ||
137d3edb TH |
476 | /** |
477 | * sg_miter_start - start mapping iteration over a sg list | |
478 | * @miter: sg mapping iter to be started | |
479 | * @sgl: sg list to iterate over | |
480 | * @nents: number of sg entries | |
481 | * | |
482 | * Description: | |
483 | * Starts mapping iterator @miter. | |
484 | * | |
485 | * Context: | |
486 | * Don't care. | |
487 | */ | |
488 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | |
489 | unsigned int nents, unsigned int flags) | |
490 | { | |
491 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | |
492 | ||
4225fc85 | 493 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); |
6de7e356 | 494 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); |
137d3edb TH |
495 | miter->__flags = flags; |
496 | } | |
497 | EXPORT_SYMBOL(sg_miter_start); | |
498 | ||
11052004 AM |
499 | static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) |
500 | { | |
501 | if (!miter->__remaining) { | |
502 | struct scatterlist *sg; | |
503 | unsigned long pgoffset; | |
504 | ||
505 | if (!__sg_page_iter_next(&miter->piter)) | |
506 | return false; | |
507 | ||
508 | sg = miter->piter.sg; | |
509 | pgoffset = miter->piter.sg_pgoffset; | |
510 | ||
511 | miter->__offset = pgoffset ? 0 : sg->offset; | |
512 | miter->__remaining = sg->offset + sg->length - | |
513 | (pgoffset << PAGE_SHIFT) - miter->__offset; | |
514 | miter->__remaining = min_t(unsigned long, miter->__remaining, | |
515 | PAGE_SIZE - miter->__offset); | |
516 | } | |
517 | ||
518 | return true; | |
519 | } | |
520 | ||
df642cea AM |
521 | /** |
522 | * sg_miter_skip - reposition mapping iterator | |
523 | * @miter: sg mapping iter to be skipped | |
524 | * @offset: number of bytes to plus the current location | |
525 | * | |
526 | * Description: | |
527 | * Sets the offset of @miter to its current location plus @offset bytes. | |
528 | * If mapping iterator @miter has been proceeded by sg_miter_next(), this | |
529 | * stops @miter. | |
530 | * | |
531 | * Context: | |
532 | * Don't care if @miter is stopped, or not proceeded yet. | |
533 | * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. | |
534 | * | |
535 | * Returns: | |
536 | * true if @miter contains the valid mapping. false if end of sg | |
537 | * list is reached. | |
538 | */ | |
0d6077f8 | 539 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) |
df642cea AM |
540 | { |
541 | sg_miter_stop(miter); | |
542 | ||
543 | while (offset) { | |
544 | off_t consumed; | |
545 | ||
546 | if (!sg_miter_get_next_page(miter)) | |
547 | return false; | |
548 | ||
549 | consumed = min_t(off_t, offset, miter->__remaining); | |
550 | miter->__offset += consumed; | |
551 | miter->__remaining -= consumed; | |
552 | offset -= consumed; | |
553 | } | |
554 | ||
555 | return true; | |
556 | } | |
0d6077f8 | 557 | EXPORT_SYMBOL(sg_miter_skip); |
df642cea | 558 | |
137d3edb TH |
559 | /** |
560 | * sg_miter_next - proceed mapping iterator to the next mapping | |
561 | * @miter: sg mapping iter to proceed | |
562 | * | |
563 | * Description: | |
8290e2d2 TH |
564 | * Proceeds @miter to the next mapping. @miter should have been started |
565 | * using sg_miter_start(). On successful return, @miter->page, | |
566 | * @miter->addr and @miter->length point to the current mapping. | |
137d3edb TH |
567 | * |
568 | * Context: | |
8290e2d2 TH |
569 | * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled |
570 | * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. | |
137d3edb TH |
571 | * |
572 | * Returns: | |
573 | * true if @miter contains the next mapping. false if end of sg | |
574 | * list is reached. | |
575 | */ | |
576 | bool sg_miter_next(struct sg_mapping_iter *miter) | |
577 | { | |
137d3edb TH |
578 | sg_miter_stop(miter); |
579 | ||
4225fc85 ID |
580 | /* |
581 | * Get to the next page if necessary. | |
582 | * __remaining, __offset is adjusted by sg_miter_stop | |
583 | */ | |
11052004 AM |
584 | if (!sg_miter_get_next_page(miter)) |
585 | return false; | |
137d3edb | 586 | |
2db76d7c | 587 | miter->page = sg_page_iter_page(&miter->piter); |
4225fc85 | 588 | miter->consumed = miter->length = miter->__remaining; |
137d3edb TH |
589 | |
590 | if (miter->__flags & SG_MITER_ATOMIC) | |
4225fc85 | 591 | miter->addr = kmap_atomic(miter->page) + miter->__offset; |
137d3edb | 592 | else |
4225fc85 | 593 | miter->addr = kmap(miter->page) + miter->__offset; |
137d3edb TH |
594 | |
595 | return true; | |
596 | } | |
597 | EXPORT_SYMBOL(sg_miter_next); | |
598 | ||
599 | /** | |
600 | * sg_miter_stop - stop mapping iteration | |
601 | * @miter: sg mapping iter to be stopped | |
602 | * | |
603 | * Description: | |
604 | * Stops mapping iterator @miter. @miter should have been started | |
605 | * started using sg_miter_start(). A stopped iteration can be | |
606 | * resumed by calling sg_miter_next() on it. This is useful when | |
607 | * resources (kmap) need to be released during iteration. | |
608 | * | |
609 | * Context: | |
8290e2d2 TH |
610 | * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care |
611 | * otherwise. | |
137d3edb TH |
612 | */ |
613 | void sg_miter_stop(struct sg_mapping_iter *miter) | |
614 | { | |
615 | WARN_ON(miter->consumed > miter->length); | |
616 | ||
617 | /* drop resources from the last iteration */ | |
618 | if (miter->addr) { | |
619 | miter->__offset += miter->consumed; | |
4225fc85 | 620 | miter->__remaining -= miter->consumed; |
137d3edb | 621 | |
3d77b50c ML |
622 | if ((miter->__flags & SG_MITER_TO_SG) && |
623 | !PageSlab(miter->page)) | |
6de7e356 SAS |
624 | flush_kernel_dcache_page(miter->page); |
625 | ||
137d3edb | 626 | if (miter->__flags & SG_MITER_ATOMIC) { |
8290e2d2 | 627 | WARN_ON_ONCE(preemptible()); |
c3eede8e | 628 | kunmap_atomic(miter->addr); |
137d3edb | 629 | } else |
f652c521 | 630 | kunmap(miter->page); |
137d3edb TH |
631 | |
632 | miter->page = NULL; | |
633 | miter->addr = NULL; | |
634 | miter->length = 0; | |
635 | miter->consumed = 0; | |
636 | } | |
637 | } | |
638 | EXPORT_SYMBOL(sg_miter_stop); | |
639 | ||
b1adaf65 FT |
640 | /** |
641 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | |
642 | * @sgl: The SG list | |
643 | * @nents: Number of SG entries | |
644 | * @buf: Where to copy from | |
645 | * @buflen: The number of bytes to copy | |
df642cea AM |
646 | * @skip: Number of bytes to skip before copying |
647 | * @to_buffer: transfer direction (true == from an sg list to a | |
648 | * buffer, false == from a buffer to an sg list | |
b1adaf65 FT |
649 | * |
650 | * Returns the number of copied bytes. | |
651 | * | |
652 | **/ | |
653 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |
df642cea AM |
654 | void *buf, size_t buflen, off_t skip, |
655 | bool to_buffer) | |
b1adaf65 | 656 | { |
137d3edb TH |
657 | unsigned int offset = 0; |
658 | struct sg_mapping_iter miter; | |
50bed2e2 | 659 | unsigned long flags; |
6de7e356 SAS |
660 | unsigned int sg_flags = SG_MITER_ATOMIC; |
661 | ||
662 | if (to_buffer) | |
663 | sg_flags |= SG_MITER_FROM_SG; | |
664 | else | |
665 | sg_flags |= SG_MITER_TO_SG; | |
137d3edb | 666 | |
6de7e356 | 667 | sg_miter_start(&miter, sgl, nents, sg_flags); |
137d3edb | 668 | |
df642cea AM |
669 | if (!sg_miter_skip(&miter, skip)) |
670 | return false; | |
671 | ||
50bed2e2 FT |
672 | local_irq_save(flags); |
673 | ||
137d3edb TH |
674 | while (sg_miter_next(&miter) && offset < buflen) { |
675 | unsigned int len; | |
676 | ||
677 | len = min(miter.length, buflen - offset); | |
678 | ||
679 | if (to_buffer) | |
680 | memcpy(buf + offset, miter.addr, len); | |
6de7e356 | 681 | else |
137d3edb | 682 | memcpy(miter.addr, buf + offset, len); |
b1adaf65 | 683 | |
137d3edb | 684 | offset += len; |
b1adaf65 FT |
685 | } |
686 | ||
137d3edb TH |
687 | sg_miter_stop(&miter); |
688 | ||
50bed2e2 | 689 | local_irq_restore(flags); |
137d3edb | 690 | return offset; |
b1adaf65 FT |
691 | } |
692 | ||
693 | /** | |
694 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list | |
695 | * @sgl: The SG list | |
696 | * @nents: Number of SG entries | |
697 | * @buf: Where to copy from | |
698 | * @buflen: The number of bytes to copy | |
699 | * | |
700 | * Returns the number of copied bytes. | |
701 | * | |
702 | **/ | |
703 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | |
704 | void *buf, size_t buflen) | |
705 | { | |
df642cea | 706 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); |
b1adaf65 FT |
707 | } |
708 | EXPORT_SYMBOL(sg_copy_from_buffer); | |
709 | ||
710 | /** | |
711 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer | |
712 | * @sgl: The SG list | |
713 | * @nents: Number of SG entries | |
714 | * @buf: Where to copy to | |
715 | * @buflen: The number of bytes to copy | |
716 | * | |
717 | * Returns the number of copied bytes. | |
718 | * | |
719 | **/ | |
720 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |
721 | void *buf, size_t buflen) | |
722 | { | |
df642cea | 723 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); |
b1adaf65 FT |
724 | } |
725 | EXPORT_SYMBOL(sg_copy_to_buffer); | |
df642cea AM |
726 | |
727 | /** | |
728 | * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list | |
729 | * @sgl: The SG list | |
730 | * @nents: Number of SG entries | |
731 | * @buf: Where to copy from | |
732 | * @skip: Number of bytes to skip before copying | |
733 | * @buflen: The number of bytes to copy | |
734 | * | |
735 | * Returns the number of copied bytes. | |
736 | * | |
737 | **/ | |
738 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, | |
739 | void *buf, size_t buflen, off_t skip) | |
740 | { | |
741 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); | |
742 | } | |
743 | EXPORT_SYMBOL(sg_pcopy_from_buffer); | |
744 | ||
745 | /** | |
746 | * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer | |
747 | * @sgl: The SG list | |
748 | * @nents: Number of SG entries | |
749 | * @buf: Where to copy to | |
750 | * @skip: Number of bytes to skip before copying | |
751 | * @buflen: The number of bytes to copy | |
752 | * | |
753 | * Returns the number of copied bytes. | |
754 | * | |
755 | **/ | |
756 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |
757 | void *buf, size_t buflen, off_t skip) | |
758 | { | |
759 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); | |
760 | } | |
761 | EXPORT_SYMBOL(sg_pcopy_to_buffer); |