]>
Commit | Line | Data |
---|---|---|
5274f052 JA |
1 | /* |
2 | * "splice": joining two ropes together by interweaving their strands. | |
3 | * | |
4 | * This is the "extended pipe" functionality, where a pipe is used as | |
5 | * an arbitrary in-memory buffer. Think of a pipe as a small kernel | |
6 | * buffer that you can use to transfer data from one end to the other. | |
7 | * | |
8 | * The traditional unix read/write is extended with a "splice()" operation | |
9 | * that transfers data buffers to or from a pipe buffer. | |
10 | * | |
11 | * Named by Larry McVoy, original implementation from Linus, extended by | |
c2058e06 JA |
12 | * Jens to support splicing to files, network, direct splicing, etc and |
13 | * fixing lots of bugs. | |
5274f052 | 14 | * |
0fe23479 | 15 | * Copyright (C) 2005-2006 Jens Axboe <[email protected]> |
c2058e06 JA |
16 | * Copyright (C) 2005-2006 Linus Torvalds <[email protected]> |
17 | * Copyright (C) 2006 Ingo Molnar <[email protected]> | |
5274f052 JA |
18 | * |
19 | */ | |
20 | #include <linux/fs.h> | |
21 | #include <linux/file.h> | |
22 | #include <linux/pagemap.h> | |
d6b29d7c | 23 | #include <linux/splice.h> |
5274f052 | 24 | #include <linux/mm_inline.h> |
5abc97aa | 25 | #include <linux/swap.h> |
4f6f0bd2 JA |
26 | #include <linux/writeback.h> |
27 | #include <linux/buffer_head.h> | |
a0f06780 | 28 | #include <linux/module.h> |
4f6f0bd2 | 29 | #include <linux/syscalls.h> |
912d35f8 | 30 | #include <linux/uio.h> |
29ce2058 | 31 | #include <linux/security.h> |
5274f052 | 32 | |
83f9135b JA |
33 | /* |
34 | * Attempt to steal a page from a pipe buffer. This should perhaps go into | |
35 | * a vm helper function, it's already simplified quite a bit by the | |
36 | * addition of remove_mapping(). If success is returned, the caller may | |
37 | * attempt to reuse this page for another destination. | |
38 | */ | |
76ad4d11 | 39 | static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, |
5abc97aa JA |
40 | struct pipe_buffer *buf) |
41 | { | |
42 | struct page *page = buf->page; | |
9e94cd4f | 43 | struct address_space *mapping; |
5abc97aa | 44 | |
9e0267c2 JA |
45 | lock_page(page); |
46 | ||
9e94cd4f JA |
47 | mapping = page_mapping(page); |
48 | if (mapping) { | |
49 | WARN_ON(!PageUptodate(page)); | |
5abc97aa | 50 | |
9e94cd4f JA |
51 | /* |
52 | * At least for ext2 with nobh option, we need to wait on | |
53 | * writeback completing on this page, since we'll remove it | |
54 | * from the pagecache. Otherwise truncate wont wait on the | |
55 | * page, allowing the disk blocks to be reused by someone else | |
56 | * before we actually wrote our data to them. fs corruption | |
57 | * ensues. | |
58 | */ | |
59 | wait_on_page_writeback(page); | |
ad8d6f0a | 60 | |
9e94cd4f | 61 | if (PagePrivate(page)) |
2ae88149 | 62 | try_to_release_page(page, GFP_KERNEL); |
4f6f0bd2 | 63 | |
9e94cd4f JA |
64 | /* |
65 | * If we succeeded in removing the mapping, set LRU flag | |
66 | * and return good. | |
67 | */ | |
68 | if (remove_mapping(mapping, page)) { | |
69 | buf->flags |= PIPE_BUF_FLAG_LRU; | |
70 | return 0; | |
71 | } | |
9e0267c2 | 72 | } |
5abc97aa | 73 | |
9e94cd4f JA |
74 | /* |
75 | * Raced with truncate or failed to remove page from current | |
76 | * address space, unlock and return failure. | |
77 | */ | |
78 | unlock_page(page); | |
79 | return 1; | |
5abc97aa JA |
80 | } |
81 | ||
76ad4d11 | 82 | static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, |
5274f052 JA |
83 | struct pipe_buffer *buf) |
84 | { | |
85 | page_cache_release(buf->page); | |
1432873a | 86 | buf->flags &= ~PIPE_BUF_FLAG_LRU; |
5274f052 JA |
87 | } |
88 | ||
0845718d JA |
89 | /* |
90 | * Check whether the contents of buf is OK to access. Since the content | |
91 | * is a page cache page, IO may be in flight. | |
92 | */ | |
cac36bb0 JA |
93 | static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, |
94 | struct pipe_buffer *buf) | |
5274f052 JA |
95 | { |
96 | struct page *page = buf->page; | |
49d0b21b | 97 | int err; |
5274f052 JA |
98 | |
99 | if (!PageUptodate(page)) { | |
49d0b21b JA |
100 | lock_page(page); |
101 | ||
102 | /* | |
103 | * Page got truncated/unhashed. This will cause a 0-byte | |
73d62d83 | 104 | * splice, if this is the first page. |
49d0b21b JA |
105 | */ |
106 | if (!page->mapping) { | |
107 | err = -ENODATA; | |
108 | goto error; | |
109 | } | |
5274f052 | 110 | |
49d0b21b | 111 | /* |
73d62d83 | 112 | * Uh oh, read-error from disk. |
49d0b21b JA |
113 | */ |
114 | if (!PageUptodate(page)) { | |
115 | err = -EIO; | |
116 | goto error; | |
117 | } | |
118 | ||
119 | /* | |
f84d7519 | 120 | * Page is ok afterall, we are done. |
49d0b21b | 121 | */ |
5274f052 | 122 | unlock_page(page); |
5274f052 JA |
123 | } |
124 | ||
f84d7519 | 125 | return 0; |
49d0b21b JA |
126 | error: |
127 | unlock_page(page); | |
f84d7519 | 128 | return err; |
70524490 JA |
129 | } |
130 | ||
d4c3cca9 | 131 | static const struct pipe_buf_operations page_cache_pipe_buf_ops = { |
5274f052 | 132 | .can_merge = 0, |
f84d7519 JA |
133 | .map = generic_pipe_buf_map, |
134 | .unmap = generic_pipe_buf_unmap, | |
cac36bb0 | 135 | .confirm = page_cache_pipe_buf_confirm, |
5274f052 | 136 | .release = page_cache_pipe_buf_release, |
5abc97aa | 137 | .steal = page_cache_pipe_buf_steal, |
f84d7519 | 138 | .get = generic_pipe_buf_get, |
5274f052 JA |
139 | }; |
140 | ||
912d35f8 JA |
141 | static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, |
142 | struct pipe_buffer *buf) | |
143 | { | |
7afa6fd0 JA |
144 | if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) |
145 | return 1; | |
146 | ||
1432873a | 147 | buf->flags |= PIPE_BUF_FLAG_LRU; |
330ab716 | 148 | return generic_pipe_buf_steal(pipe, buf); |
912d35f8 JA |
149 | } |
150 | ||
d4c3cca9 | 151 | static const struct pipe_buf_operations user_page_pipe_buf_ops = { |
912d35f8 | 152 | .can_merge = 0, |
f84d7519 JA |
153 | .map = generic_pipe_buf_map, |
154 | .unmap = generic_pipe_buf_unmap, | |
cac36bb0 | 155 | .confirm = generic_pipe_buf_confirm, |
912d35f8 JA |
156 | .release = page_cache_pipe_buf_release, |
157 | .steal = user_page_pipe_buf_steal, | |
f84d7519 | 158 | .get = generic_pipe_buf_get, |
912d35f8 JA |
159 | }; |
160 | ||
932cc6d4 JA |
161 | /** |
162 | * splice_to_pipe - fill passed data into a pipe | |
163 | * @pipe: pipe to fill | |
164 | * @spd: data to fill | |
165 | * | |
166 | * Description: | |
79685b8d | 167 | * @spd contains a map of pages and len/offset tuples, along with |
932cc6d4 JA |
168 | * the struct pipe_buf_operations associated with these pages. This |
169 | * function will link that data to the pipe. | |
170 | * | |
83f9135b | 171 | */ |
d6b29d7c JA |
172 | ssize_t splice_to_pipe(struct pipe_inode_info *pipe, |
173 | struct splice_pipe_desc *spd) | |
5274f052 | 174 | { |
00de00bd | 175 | unsigned int spd_pages = spd->nr_pages; |
912d35f8 | 176 | int ret, do_wakeup, page_nr; |
5274f052 JA |
177 | |
178 | ret = 0; | |
179 | do_wakeup = 0; | |
912d35f8 | 180 | page_nr = 0; |
5274f052 | 181 | |
3a326a2c IM |
182 | if (pipe->inode) |
183 | mutex_lock(&pipe->inode->i_mutex); | |
5274f052 | 184 | |
5274f052 | 185 | for (;;) { |
3a326a2c | 186 | if (!pipe->readers) { |
5274f052 JA |
187 | send_sig(SIGPIPE, current, 0); |
188 | if (!ret) | |
189 | ret = -EPIPE; | |
190 | break; | |
191 | } | |
192 | ||
6f767b04 JA |
193 | if (pipe->nrbufs < PIPE_BUFFERS) { |
194 | int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1); | |
3a326a2c | 195 | struct pipe_buffer *buf = pipe->bufs + newbuf; |
5274f052 | 196 | |
912d35f8 JA |
197 | buf->page = spd->pages[page_nr]; |
198 | buf->offset = spd->partial[page_nr].offset; | |
199 | buf->len = spd->partial[page_nr].len; | |
497f9625 | 200 | buf->private = spd->partial[page_nr].private; |
912d35f8 | 201 | buf->ops = spd->ops; |
7afa6fd0 JA |
202 | if (spd->flags & SPLICE_F_GIFT) |
203 | buf->flags |= PIPE_BUF_FLAG_GIFT; | |
204 | ||
6f767b04 | 205 | pipe->nrbufs++; |
912d35f8 JA |
206 | page_nr++; |
207 | ret += buf->len; | |
208 | ||
6f767b04 JA |
209 | if (pipe->inode) |
210 | do_wakeup = 1; | |
5274f052 | 211 | |
912d35f8 | 212 | if (!--spd->nr_pages) |
5274f052 | 213 | break; |
6f767b04 | 214 | if (pipe->nrbufs < PIPE_BUFFERS) |
5274f052 JA |
215 | continue; |
216 | ||
217 | break; | |
218 | } | |
219 | ||
912d35f8 | 220 | if (spd->flags & SPLICE_F_NONBLOCK) { |
29e35094 LT |
221 | if (!ret) |
222 | ret = -EAGAIN; | |
223 | break; | |
224 | } | |
225 | ||
5274f052 JA |
226 | if (signal_pending(current)) { |
227 | if (!ret) | |
228 | ret = -ERESTARTSYS; | |
229 | break; | |
230 | } | |
231 | ||
232 | if (do_wakeup) { | |
c0bd1f65 | 233 | smp_mb(); |
3a326a2c IM |
234 | if (waitqueue_active(&pipe->wait)) |
235 | wake_up_interruptible_sync(&pipe->wait); | |
236 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
5274f052 JA |
237 | do_wakeup = 0; |
238 | } | |
239 | ||
3a326a2c IM |
240 | pipe->waiting_writers++; |
241 | pipe_wait(pipe); | |
242 | pipe->waiting_writers--; | |
5274f052 JA |
243 | } |
244 | ||
02676e5a | 245 | if (pipe->inode) { |
3a326a2c | 246 | mutex_unlock(&pipe->inode->i_mutex); |
5274f052 | 247 | |
02676e5a JA |
248 | if (do_wakeup) { |
249 | smp_mb(); | |
250 | if (waitqueue_active(&pipe->wait)) | |
251 | wake_up_interruptible(&pipe->wait); | |
252 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
253 | } | |
5274f052 JA |
254 | } |
255 | ||
00de00bd | 256 | while (page_nr < spd_pages) |
912d35f8 | 257 | page_cache_release(spd->pages[page_nr++]); |
5274f052 JA |
258 | |
259 | return ret; | |
260 | } | |
261 | ||
3a326a2c | 262 | static int |
cbb7e577 JA |
263 | __generic_file_splice_read(struct file *in, loff_t *ppos, |
264 | struct pipe_inode_info *pipe, size_t len, | |
265 | unsigned int flags) | |
5274f052 JA |
266 | { |
267 | struct address_space *mapping = in->f_mapping; | |
d8983910 | 268 | unsigned int loff, nr_pages, req_pages; |
16c523dd | 269 | struct page *pages[PIPE_BUFFERS]; |
912d35f8 | 270 | struct partial_page partial[PIPE_BUFFERS]; |
5274f052 | 271 | struct page *page; |
91ad66ef JA |
272 | pgoff_t index, end_index; |
273 | loff_t isize; | |
eb20796b | 274 | int error, page_nr; |
912d35f8 JA |
275 | struct splice_pipe_desc spd = { |
276 | .pages = pages, | |
277 | .partial = partial, | |
278 | .flags = flags, | |
279 | .ops = &page_cache_pipe_buf_ops, | |
280 | }; | |
5274f052 | 281 | |
cbb7e577 | 282 | index = *ppos >> PAGE_CACHE_SHIFT; |
912d35f8 | 283 | loff = *ppos & ~PAGE_CACHE_MASK; |
d8983910 FW |
284 | req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
285 | nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS); | |
5274f052 | 286 | |
eb20796b JA |
287 | /* |
288 | * Lookup the (hopefully) full range of pages we need. | |
289 | */ | |
290 | spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages); | |
431a4820 | 291 | index += spd.nr_pages; |
82aa5d61 | 292 | |
eb20796b JA |
293 | /* |
294 | * If find_get_pages_contig() returned fewer pages than we needed, | |
431a4820 | 295 | * readahead/allocate the rest and fill in the holes. |
eb20796b | 296 | */ |
431a4820 | 297 | if (spd.nr_pages < nr_pages) |
cf914a7d RR |
298 | page_cache_sync_readahead(mapping, &in->f_ra, in, |
299 | index, req_pages - spd.nr_pages); | |
431a4820 | 300 | |
932cc6d4 | 301 | error = 0; |
eb20796b | 302 | while (spd.nr_pages < nr_pages) { |
82aa5d61 | 303 | /* |
eb20796b JA |
304 | * Page could be there, find_get_pages_contig() breaks on |
305 | * the first hole. | |
5274f052 | 306 | */ |
7480a904 JA |
307 | page = find_get_page(mapping, index); |
308 | if (!page) { | |
7480a904 | 309 | /* |
eb20796b | 310 | * page didn't exist, allocate one. |
7480a904 JA |
311 | */ |
312 | page = page_cache_alloc_cold(mapping); | |
313 | if (!page) | |
314 | break; | |
315 | ||
316 | error = add_to_page_cache_lru(page, mapping, index, | |
2ae88149 | 317 | GFP_KERNEL); |
7480a904 JA |
318 | if (unlikely(error)) { |
319 | page_cache_release(page); | |
a0548871 JA |
320 | if (error == -EEXIST) |
321 | continue; | |
7480a904 JA |
322 | break; |
323 | } | |
eb20796b JA |
324 | /* |
325 | * add_to_page_cache() locks the page, unlock it | |
326 | * to avoid convoluting the logic below even more. | |
327 | */ | |
328 | unlock_page(page); | |
7480a904 JA |
329 | } |
330 | ||
eb20796b JA |
331 | pages[spd.nr_pages++] = page; |
332 | index++; | |
333 | } | |
334 | ||
335 | /* | |
336 | * Now loop over the map and see if we need to start IO on any | |
337 | * pages, fill in the partial map, etc. | |
338 | */ | |
339 | index = *ppos >> PAGE_CACHE_SHIFT; | |
340 | nr_pages = spd.nr_pages; | |
341 | spd.nr_pages = 0; | |
342 | for (page_nr = 0; page_nr < nr_pages; page_nr++) { | |
343 | unsigned int this_len; | |
344 | ||
345 | if (!len) | |
346 | break; | |
347 | ||
348 | /* | |
349 | * this_len is the max we'll use from this page | |
350 | */ | |
351 | this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); | |
352 | page = pages[page_nr]; | |
353 | ||
a08a166f | 354 | if (PageReadahead(page)) |
cf914a7d | 355 | page_cache_async_readahead(mapping, &in->f_ra, in, |
d8983910 | 356 | page, index, req_pages - page_nr); |
a08a166f | 357 | |
7480a904 JA |
358 | /* |
359 | * If the page isn't uptodate, we may need to start io on it | |
360 | */ | |
361 | if (!PageUptodate(page)) { | |
c4f895cb JA |
362 | /* |
363 | * If in nonblock mode then dont block on waiting | |
364 | * for an in-flight io page | |
365 | */ | |
9ae9d68c FW |
366 | if (flags & SPLICE_F_NONBLOCK) { |
367 | if (TestSetPageLocked(page)) | |
368 | break; | |
369 | } else | |
370 | lock_page(page); | |
7480a904 JA |
371 | |
372 | /* | |
373 | * page was truncated, stop here. if this isn't the | |
374 | * first page, we'll just complete what we already | |
375 | * added | |
376 | */ | |
377 | if (!page->mapping) { | |
378 | unlock_page(page); | |
7480a904 JA |
379 | break; |
380 | } | |
381 | /* | |
382 | * page was already under io and is now done, great | |
383 | */ | |
384 | if (PageUptodate(page)) { | |
385 | unlock_page(page); | |
386 | goto fill_it; | |
387 | } | |
5274f052 | 388 | |
7480a904 JA |
389 | /* |
390 | * need to read in the page | |
391 | */ | |
392 | error = mapping->a_ops->readpage(in, page); | |
5274f052 | 393 | if (unlikely(error)) { |
eb20796b JA |
394 | /* |
395 | * We really should re-lookup the page here, | |
396 | * but it complicates things a lot. Instead | |
397 | * lets just do what we already stored, and | |
398 | * we'll get it the next time we are called. | |
399 | */ | |
7480a904 | 400 | if (error == AOP_TRUNCATED_PAGE) |
eb20796b JA |
401 | error = 0; |
402 | ||
5274f052 JA |
403 | break; |
404 | } | |
620a324b JA |
405 | } |
406 | fill_it: | |
407 | /* | |
408 | * i_size must be checked after PageUptodate. | |
409 | */ | |
410 | isize = i_size_read(mapping->host); | |
411 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | |
412 | if (unlikely(!isize || index > end_index)) | |
413 | break; | |
414 | ||
415 | /* | |
416 | * if this is the last page, see if we need to shrink | |
417 | * the length and stop | |
418 | */ | |
419 | if (end_index == index) { | |
420 | unsigned int plen; | |
91ad66ef JA |
421 | |
422 | /* | |
620a324b | 423 | * max good bytes in this page |
91ad66ef | 424 | */ |
620a324b JA |
425 | plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; |
426 | if (plen <= loff) | |
91ad66ef | 427 | break; |
91ad66ef JA |
428 | |
429 | /* | |
620a324b | 430 | * force quit after adding this page |
91ad66ef | 431 | */ |
620a324b JA |
432 | this_len = min(this_len, plen - loff); |
433 | len = this_len; | |
5274f052 | 434 | } |
620a324b | 435 | |
eb20796b JA |
436 | partial[page_nr].offset = loff; |
437 | partial[page_nr].len = this_len; | |
82aa5d61 | 438 | len -= this_len; |
91ad66ef | 439 | loff = 0; |
eb20796b JA |
440 | spd.nr_pages++; |
441 | index++; | |
5274f052 JA |
442 | } |
443 | ||
eb20796b | 444 | /* |
475ecade | 445 | * Release any pages at the end, if we quit early. 'page_nr' is how far |
eb20796b JA |
446 | * we got, 'nr_pages' is how many pages are in the map. |
447 | */ | |
448 | while (page_nr < nr_pages) | |
449 | page_cache_release(pages[page_nr++]); | |
f4e6b498 | 450 | in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; |
eb20796b | 451 | |
912d35f8 | 452 | if (spd.nr_pages) |
00522fb4 | 453 | return splice_to_pipe(pipe, &spd); |
5274f052 | 454 | |
7480a904 | 455 | return error; |
5274f052 JA |
456 | } |
457 | ||
83f9135b JA |
458 | /** |
459 | * generic_file_splice_read - splice data from file to a pipe | |
460 | * @in: file to splice from | |
932cc6d4 | 461 | * @ppos: position in @in |
83f9135b JA |
462 | * @pipe: pipe to splice to |
463 | * @len: number of bytes to splice | |
464 | * @flags: splice modifier flags | |
465 | * | |
932cc6d4 JA |
466 | * Description: |
467 | * Will read pages from given file and fill them into a pipe. Can be | |
468 | * used as long as the address_space operations for the source implements | |
469 | * a readpage() hook. | |
470 | * | |
83f9135b | 471 | */ |
cbb7e577 JA |
472 | ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, |
473 | struct pipe_inode_info *pipe, size_t len, | |
474 | unsigned int flags) | |
5274f052 JA |
475 | { |
476 | ssize_t spliced; | |
477 | int ret; | |
d366d398 JA |
478 | loff_t isize, left; |
479 | ||
480 | isize = i_size_read(in->f_mapping->host); | |
481 | if (unlikely(*ppos >= isize)) | |
482 | return 0; | |
483 | ||
484 | left = isize - *ppos; | |
485 | if (unlikely(left < len)) | |
486 | len = left; | |
5274f052 JA |
487 | |
488 | ret = 0; | |
489 | spliced = 0; | |
51a92c0f | 490 | while (len && !spliced) { |
cbb7e577 | 491 | ret = __generic_file_splice_read(in, ppos, pipe, len, flags); |
5274f052 | 492 | |
c4f895cb | 493 | if (ret < 0) |
5274f052 | 494 | break; |
c4f895cb JA |
495 | else if (!ret) { |
496 | if (spliced) | |
497 | break; | |
498 | if (flags & SPLICE_F_NONBLOCK) { | |
499 | ret = -EAGAIN; | |
500 | break; | |
501 | } | |
502 | } | |
5274f052 | 503 | |
cbb7e577 | 504 | *ppos += ret; |
5274f052 JA |
505 | len -= ret; |
506 | spliced += ret; | |
507 | } | |
508 | ||
509 | if (spliced) | |
510 | return spliced; | |
511 | ||
512 | return ret; | |
513 | } | |
514 | ||
059a8f37 JA |
515 | EXPORT_SYMBOL(generic_file_splice_read); |
516 | ||
5274f052 | 517 | /* |
4f6f0bd2 | 518 | * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' |
016b661e | 519 | * using sendpage(). Return the number of bytes sent. |
5274f052 | 520 | */ |
76ad4d11 | 521 | static int pipe_to_sendpage(struct pipe_inode_info *pipe, |
5274f052 JA |
522 | struct pipe_buffer *buf, struct splice_desc *sd) |
523 | { | |
6a14b90b | 524 | struct file *file = sd->u.file; |
5274f052 | 525 | loff_t pos = sd->pos; |
f84d7519 | 526 | int ret, more; |
5274f052 | 527 | |
cac36bb0 | 528 | ret = buf->ops->confirm(pipe, buf); |
f84d7519 JA |
529 | if (!ret) { |
530 | more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; | |
5274f052 | 531 | |
f84d7519 JA |
532 | ret = file->f_op->sendpage(file, buf->page, buf->offset, |
533 | sd->len, &pos, more); | |
534 | } | |
5274f052 | 535 | |
016b661e | 536 | return ret; |
5274f052 JA |
537 | } |
538 | ||
539 | /* | |
540 | * This is a little more tricky than the file -> pipe splicing. There are | |
541 | * basically three cases: | |
542 | * | |
543 | * - Destination page already exists in the address space and there | |
544 | * are users of it. For that case we have no other option that | |
545 | * copying the data. Tough luck. | |
546 | * - Destination page already exists in the address space, but there | |
547 | * are no users of it. Make sure it's uptodate, then drop it. Fall | |
548 | * through to last case. | |
549 | * - Destination page does not exist, we can add the pipe page to | |
550 | * the page cache and avoid the copy. | |
551 | * | |
83f9135b JA |
552 | * If asked to move pages to the output file (SPLICE_F_MOVE is set in |
553 | * sd->flags), we attempt to migrate pages from the pipe to the output | |
554 | * file address space page cache. This is possible if no one else has | |
555 | * the pipe page referenced outside of the pipe and page cache. If | |
556 | * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create | |
557 | * a new page in the output file page cache and fill/dirty that. | |
5274f052 | 558 | */ |
76ad4d11 | 559 | static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, |
5274f052 JA |
560 | struct splice_desc *sd) |
561 | { | |
6a14b90b | 562 | struct file *file = sd->u.file; |
5274f052 | 563 | struct address_space *mapping = file->f_mapping; |
016b661e | 564 | unsigned int offset, this_len; |
5274f052 | 565 | struct page *page; |
afddba49 | 566 | void *fsdata; |
3e7ee3e7 | 567 | int ret; |
5274f052 JA |
568 | |
569 | /* | |
49d0b21b | 570 | * make sure the data in this buffer is uptodate |
5274f052 | 571 | */ |
cac36bb0 | 572 | ret = buf->ops->confirm(pipe, buf); |
f84d7519 JA |
573 | if (unlikely(ret)) |
574 | return ret; | |
5274f052 | 575 | |
5274f052 JA |
576 | offset = sd->pos & ~PAGE_CACHE_MASK; |
577 | ||
016b661e JA |
578 | this_len = sd->len; |
579 | if (this_len + offset > PAGE_CACHE_SIZE) | |
580 | this_len = PAGE_CACHE_SIZE - offset; | |
581 | ||
afddba49 NP |
582 | ret = pagecache_write_begin(file, mapping, sd->pos, this_len, |
583 | AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); | |
584 | if (unlikely(ret)) | |
585 | goto out; | |
5274f052 | 586 | |
0568b409 | 587 | if (buf->page != page) { |
f84d7519 JA |
588 | /* |
589 | * Careful, ->map() uses KM_USER0! | |
590 | */ | |
76ad4d11 | 591 | char *src = buf->ops->map(pipe, buf, 1); |
f84d7519 | 592 | char *dst = kmap_atomic(page, KM_USER1); |
5abc97aa | 593 | |
016b661e | 594 | memcpy(dst + offset, src + buf->offset, this_len); |
5abc97aa | 595 | flush_dcache_page(page); |
f84d7519 | 596 | kunmap_atomic(dst, KM_USER1); |
76ad4d11 | 597 | buf->ops->unmap(pipe, buf, src); |
5abc97aa | 598 | } |
afddba49 NP |
599 | ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, |
600 | page, fsdata); | |
5274f052 | 601 | out: |
5274f052 JA |
602 | return ret; |
603 | } | |
604 | ||
932cc6d4 JA |
605 | /** |
606 | * __splice_from_pipe - splice data from a pipe to given actor | |
607 | * @pipe: pipe to splice from | |
608 | * @sd: information to @actor | |
609 | * @actor: handler that splices the data | |
610 | * | |
611 | * Description: | |
612 | * This function does little more than loop over the pipe and call | |
613 | * @actor to do the actual moving of a single struct pipe_buffer to | |
614 | * the desired destination. See pipe_to_file, pipe_to_sendpage, or | |
615 | * pipe_to_user. | |
616 | * | |
83f9135b | 617 | */ |
c66ab6fa JA |
618 | ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, |
619 | splice_actor *actor) | |
5274f052 | 620 | { |
5274f052 | 621 | int ret, do_wakeup, err; |
5274f052 JA |
622 | |
623 | ret = 0; | |
624 | do_wakeup = 0; | |
625 | ||
5274f052 | 626 | for (;;) { |
6f767b04 JA |
627 | if (pipe->nrbufs) { |
628 | struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; | |
d4c3cca9 | 629 | const struct pipe_buf_operations *ops = buf->ops; |
5274f052 | 630 | |
c66ab6fa JA |
631 | sd->len = buf->len; |
632 | if (sd->len > sd->total_len) | |
633 | sd->len = sd->total_len; | |
5274f052 | 634 | |
c66ab6fa | 635 | err = actor(pipe, buf, sd); |
016b661e | 636 | if (err <= 0) { |
5274f052 JA |
637 | if (!ret && err != -ENODATA) |
638 | ret = err; | |
639 | ||
640 | break; | |
641 | } | |
642 | ||
016b661e JA |
643 | ret += err; |
644 | buf->offset += err; | |
645 | buf->len -= err; | |
646 | ||
c66ab6fa JA |
647 | sd->len -= err; |
648 | sd->pos += err; | |
649 | sd->total_len -= err; | |
650 | if (sd->len) | |
016b661e | 651 | continue; |
73d62d83 | 652 | |
5274f052 JA |
653 | if (!buf->len) { |
654 | buf->ops = NULL; | |
3a326a2c | 655 | ops->release(pipe, buf); |
6f767b04 JA |
656 | pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1); |
657 | pipe->nrbufs--; | |
658 | if (pipe->inode) | |
659 | do_wakeup = 1; | |
5274f052 JA |
660 | } |
661 | ||
c66ab6fa | 662 | if (!sd->total_len) |
5274f052 JA |
663 | break; |
664 | } | |
665 | ||
6f767b04 | 666 | if (pipe->nrbufs) |
5274f052 | 667 | continue; |
3a326a2c | 668 | if (!pipe->writers) |
5274f052 | 669 | break; |
3a326a2c | 670 | if (!pipe->waiting_writers) { |
5274f052 JA |
671 | if (ret) |
672 | break; | |
673 | } | |
674 | ||
c66ab6fa | 675 | if (sd->flags & SPLICE_F_NONBLOCK) { |
29e35094 LT |
676 | if (!ret) |
677 | ret = -EAGAIN; | |
678 | break; | |
679 | } | |
680 | ||
5274f052 JA |
681 | if (signal_pending(current)) { |
682 | if (!ret) | |
683 | ret = -ERESTARTSYS; | |
684 | break; | |
685 | } | |
686 | ||
687 | if (do_wakeup) { | |
c0bd1f65 | 688 | smp_mb(); |
3a326a2c IM |
689 | if (waitqueue_active(&pipe->wait)) |
690 | wake_up_interruptible_sync(&pipe->wait); | |
691 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
5274f052 JA |
692 | do_wakeup = 0; |
693 | } | |
694 | ||
3a326a2c | 695 | pipe_wait(pipe); |
5274f052 JA |
696 | } |
697 | ||
5274f052 | 698 | if (do_wakeup) { |
c0bd1f65 | 699 | smp_mb(); |
3a326a2c IM |
700 | if (waitqueue_active(&pipe->wait)) |
701 | wake_up_interruptible(&pipe->wait); | |
702 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
5274f052 JA |
703 | } |
704 | ||
5274f052 | 705 | return ret; |
5274f052 | 706 | } |
40bee44e | 707 | EXPORT_SYMBOL(__splice_from_pipe); |
5274f052 | 708 | |
932cc6d4 JA |
709 | /** |
710 | * splice_from_pipe - splice data from a pipe to a file | |
711 | * @pipe: pipe to splice from | |
712 | * @out: file to splice to | |
713 | * @ppos: position in @out | |
714 | * @len: how many bytes to splice | |
715 | * @flags: splice modifier flags | |
716 | * @actor: handler that splices the data | |
717 | * | |
718 | * Description: | |
719 | * See __splice_from_pipe. This function locks the input and output inodes, | |
720 | * otherwise it's identical to __splice_from_pipe(). | |
721 | * | |
722 | */ | |
6da61809 MF |
723 | ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, |
724 | loff_t *ppos, size_t len, unsigned int flags, | |
725 | splice_actor *actor) | |
726 | { | |
727 | ssize_t ret; | |
728 | struct inode *inode = out->f_mapping->host; | |
c66ab6fa JA |
729 | struct splice_desc sd = { |
730 | .total_len = len, | |
731 | .flags = flags, | |
732 | .pos = *ppos, | |
6a14b90b | 733 | .u.file = out, |
c66ab6fa | 734 | }; |
6da61809 MF |
735 | |
736 | /* | |
737 | * The actor worker might be calling ->prepare_write and | |
738 | * ->commit_write. Most of the time, these expect i_mutex to | |
739 | * be held. Since this may result in an ABBA deadlock with | |
740 | * pipe->inode, we have to order lock acquiry here. | |
741 | */ | |
742 | inode_double_lock(inode, pipe->inode); | |
c66ab6fa | 743 | ret = __splice_from_pipe(pipe, &sd, actor); |
6da61809 MF |
744 | inode_double_unlock(inode, pipe->inode); |
745 | ||
746 | return ret; | |
747 | } | |
748 | ||
749 | /** | |
750 | * generic_file_splice_write_nolock - generic_file_splice_write without mutexes | |
751 | * @pipe: pipe info | |
752 | * @out: file to write to | |
932cc6d4 | 753 | * @ppos: position in @out |
6da61809 MF |
754 | * @len: number of bytes to splice |
755 | * @flags: splice modifier flags | |
756 | * | |
932cc6d4 JA |
757 | * Description: |
758 | * Will either move or copy pages (determined by @flags options) from | |
759 | * the given pipe inode to the given file. The caller is responsible | |
760 | * for acquiring i_mutex on both inodes. | |
6da61809 MF |
761 | * |
762 | */ | |
763 | ssize_t | |
764 | generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, | |
765 | loff_t *ppos, size_t len, unsigned int flags) | |
766 | { | |
767 | struct address_space *mapping = out->f_mapping; | |
768 | struct inode *inode = mapping->host; | |
c66ab6fa JA |
769 | struct splice_desc sd = { |
770 | .total_len = len, | |
771 | .flags = flags, | |
772 | .pos = *ppos, | |
6a14b90b | 773 | .u.file = out, |
c66ab6fa | 774 | }; |
6da61809 MF |
775 | ssize_t ret; |
776 | int err; | |
777 | ||
0f7fc9e4 | 778 | err = remove_suid(out->f_path.dentry); |
8c34e2d6 JA |
779 | if (unlikely(err)) |
780 | return err; | |
781 | ||
c66ab6fa | 782 | ret = __splice_from_pipe(pipe, &sd, pipe_to_file); |
6da61809 | 783 | if (ret > 0) { |
17ee4f49 JA |
784 | unsigned long nr_pages; |
785 | ||
6da61809 | 786 | *ppos += ret; |
17ee4f49 | 787 | nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
6da61809 MF |
788 | |
789 | /* | |
790 | * If file or inode is SYNC and we actually wrote some data, | |
791 | * sync it. | |
792 | */ | |
793 | if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { | |
794 | err = generic_osync_inode(inode, mapping, | |
795 | OSYNC_METADATA|OSYNC_DATA); | |
796 | ||
797 | if (err) | |
798 | ret = err; | |
799 | } | |
17ee4f49 | 800 | balance_dirty_pages_ratelimited_nr(mapping, nr_pages); |
6da61809 MF |
801 | } |
802 | ||
803 | return ret; | |
804 | } | |
805 | ||
806 | EXPORT_SYMBOL(generic_file_splice_write_nolock); | |
807 | ||
83f9135b JA |
808 | /** |
809 | * generic_file_splice_write - splice data from a pipe to a file | |
3a326a2c | 810 | * @pipe: pipe info |
83f9135b | 811 | * @out: file to write to |
932cc6d4 | 812 | * @ppos: position in @out |
83f9135b JA |
813 | * @len: number of bytes to splice |
814 | * @flags: splice modifier flags | |
815 | * | |
932cc6d4 JA |
816 | * Description: |
817 | * Will either move or copy pages (determined by @flags options) from | |
818 | * the given pipe inode to the given file. | |
83f9135b JA |
819 | * |
820 | */ | |
3a326a2c IM |
821 | ssize_t |
822 | generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, | |
cbb7e577 | 823 | loff_t *ppos, size_t len, unsigned int flags) |
5274f052 | 824 | { |
4f6f0bd2 | 825 | struct address_space *mapping = out->f_mapping; |
8c34e2d6 | 826 | struct inode *inode = mapping->host; |
b5376771 | 827 | int killsuid, killpriv; |
3a326a2c | 828 | ssize_t ret; |
b5376771 | 829 | int err = 0; |
8c34e2d6 | 830 | |
b5376771 SH |
831 | killpriv = security_inode_need_killpriv(out->f_path.dentry); |
832 | killsuid = should_remove_suid(out->f_path.dentry); | |
833 | if (unlikely(killsuid || killpriv)) { | |
8c34e2d6 | 834 | mutex_lock(&inode->i_mutex); |
b5376771 SH |
835 | if (killpriv) |
836 | err = security_inode_killpriv(out->f_path.dentry); | |
837 | if (!err && killsuid) | |
838 | err = __remove_suid(out->f_path.dentry, killsuid); | |
8c34e2d6 JA |
839 | mutex_unlock(&inode->i_mutex); |
840 | if (err) | |
841 | return err; | |
842 | } | |
3a326a2c | 843 | |
00522fb4 | 844 | ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); |
a4514ebd | 845 | if (ret > 0) { |
17ee4f49 JA |
846 | unsigned long nr_pages; |
847 | ||
a4514ebd | 848 | *ppos += ret; |
17ee4f49 | 849 | nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
a4514ebd JA |
850 | |
851 | /* | |
852 | * If file or inode is SYNC and we actually wrote some data, | |
853 | * sync it. | |
854 | */ | |
855 | if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { | |
a4514ebd JA |
856 | mutex_lock(&inode->i_mutex); |
857 | err = generic_osync_inode(inode, mapping, | |
858 | OSYNC_METADATA|OSYNC_DATA); | |
859 | mutex_unlock(&inode->i_mutex); | |
4f6f0bd2 | 860 | |
a4514ebd JA |
861 | if (err) |
862 | ret = err; | |
863 | } | |
17ee4f49 | 864 | balance_dirty_pages_ratelimited_nr(mapping, nr_pages); |
4f6f0bd2 JA |
865 | } |
866 | ||
867 | return ret; | |
5274f052 JA |
868 | } |
869 | ||
059a8f37 JA |
870 | EXPORT_SYMBOL(generic_file_splice_write); |
871 | ||
83f9135b JA |
872 | /** |
873 | * generic_splice_sendpage - splice data from a pipe to a socket | |
932cc6d4 | 874 | * @pipe: pipe to splice from |
83f9135b | 875 | * @out: socket to write to |
932cc6d4 | 876 | * @ppos: position in @out |
83f9135b JA |
877 | * @len: number of bytes to splice |
878 | * @flags: splice modifier flags | |
879 | * | |
932cc6d4 JA |
880 | * Description: |
881 | * Will send @len bytes from the pipe to a network socket. No data copying | |
882 | * is involved. | |
83f9135b JA |
883 | * |
884 | */ | |
3a326a2c | 885 | ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, |
cbb7e577 | 886 | loff_t *ppos, size_t len, unsigned int flags) |
5274f052 | 887 | { |
00522fb4 | 888 | return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); |
5274f052 JA |
889 | } |
890 | ||
059a8f37 | 891 | EXPORT_SYMBOL(generic_splice_sendpage); |
a0f06780 | 892 | |
83f9135b JA |
893 | /* |
894 | * Attempt to initiate a splice from pipe to file. | |
895 | */ | |
3a326a2c | 896 | static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, |
cbb7e577 | 897 | loff_t *ppos, size_t len, unsigned int flags) |
5274f052 | 898 | { |
5274f052 JA |
899 | int ret; |
900 | ||
49570e9b | 901 | if (unlikely(!out->f_op || !out->f_op->splice_write)) |
5274f052 JA |
902 | return -EINVAL; |
903 | ||
49570e9b | 904 | if (unlikely(!(out->f_mode & FMODE_WRITE))) |
5274f052 JA |
905 | return -EBADF; |
906 | ||
cbb7e577 | 907 | ret = rw_verify_area(WRITE, out, ppos, len); |
5274f052 JA |
908 | if (unlikely(ret < 0)) |
909 | return ret; | |
910 | ||
29ce2058 JM |
911 | ret = security_file_permission(out, MAY_WRITE); |
912 | if (unlikely(ret < 0)) | |
913 | return ret; | |
914 | ||
cbb7e577 | 915 | return out->f_op->splice_write(pipe, out, ppos, len, flags); |
5274f052 JA |
916 | } |
917 | ||
83f9135b JA |
918 | /* |
919 | * Attempt to initiate a splice from a file to a pipe. | |
920 | */ | |
cbb7e577 JA |
921 | static long do_splice_to(struct file *in, loff_t *ppos, |
922 | struct pipe_inode_info *pipe, size_t len, | |
923 | unsigned int flags) | |
5274f052 | 924 | { |
5274f052 JA |
925 | int ret; |
926 | ||
49570e9b | 927 | if (unlikely(!in->f_op || !in->f_op->splice_read)) |
5274f052 JA |
928 | return -EINVAL; |
929 | ||
49570e9b | 930 | if (unlikely(!(in->f_mode & FMODE_READ))) |
5274f052 JA |
931 | return -EBADF; |
932 | ||
cbb7e577 | 933 | ret = rw_verify_area(READ, in, ppos, len); |
5274f052 JA |
934 | if (unlikely(ret < 0)) |
935 | return ret; | |
936 | ||
29ce2058 JM |
937 | ret = security_file_permission(in, MAY_READ); |
938 | if (unlikely(ret < 0)) | |
939 | return ret; | |
940 | ||
cbb7e577 | 941 | return in->f_op->splice_read(in, ppos, pipe, len, flags); |
5274f052 JA |
942 | } |
943 | ||
932cc6d4 JA |
944 | /** |
945 | * splice_direct_to_actor - splices data directly between two non-pipes | |
946 | * @in: file to splice from | |
947 | * @sd: actor information on where to splice to | |
948 | * @actor: handles the data splicing | |
949 | * | |
950 | * Description: | |
951 | * This is a special case helper to splice directly between two | |
952 | * points, without requiring an explicit pipe. Internally an allocated | |
79685b8d | 953 | * pipe is cached in the process, and reused during the lifetime of |
932cc6d4 JA |
954 | * that process. |
955 | * | |
c66ab6fa JA |
956 | */ |
957 | ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |
958 | splice_direct_actor *actor) | |
b92ce558 JA |
959 | { |
960 | struct pipe_inode_info *pipe; | |
961 | long ret, bytes; | |
962 | umode_t i_mode; | |
c66ab6fa JA |
963 | size_t len; |
964 | int i, flags; | |
b92ce558 JA |
965 | |
966 | /* | |
967 | * We require the input being a regular file, as we don't want to | |
968 | * randomly drop data for eg socket -> socket splicing. Use the | |
969 | * piped splicing for that! | |
970 | */ | |
0f7fc9e4 | 971 | i_mode = in->f_path.dentry->d_inode->i_mode; |
b92ce558 JA |
972 | if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) |
973 | return -EINVAL; | |
974 | ||
975 | /* | |
976 | * neither in nor out is a pipe, setup an internal pipe attached to | |
977 | * 'out' and transfer the wanted data from 'in' to 'out' through that | |
978 | */ | |
979 | pipe = current->splice_pipe; | |
49570e9b | 980 | if (unlikely(!pipe)) { |
b92ce558 JA |
981 | pipe = alloc_pipe_info(NULL); |
982 | if (!pipe) | |
983 | return -ENOMEM; | |
984 | ||
985 | /* | |
986 | * We don't have an immediate reader, but we'll read the stuff | |
00522fb4 | 987 | * out of the pipe right after the splice_to_pipe(). So set |
b92ce558 JA |
988 | * PIPE_READERS appropriately. |
989 | */ | |
990 | pipe->readers = 1; | |
991 | ||
992 | current->splice_pipe = pipe; | |
993 | } | |
994 | ||
995 | /* | |
73d62d83 | 996 | * Do the splice. |
b92ce558 JA |
997 | */ |
998 | ret = 0; | |
999 | bytes = 0; | |
c66ab6fa JA |
1000 | len = sd->total_len; |
1001 | flags = sd->flags; | |
1002 | ||
1003 | /* | |
1004 | * Don't block on output, we have to drain the direct pipe. | |
1005 | */ | |
1006 | sd->flags &= ~SPLICE_F_NONBLOCK; | |
b92ce558 JA |
1007 | |
1008 | while (len) { | |
51a92c0f | 1009 | size_t read_len; |
bcd4f3ac | 1010 | loff_t pos = sd->pos; |
b92ce558 | 1011 | |
bcd4f3ac | 1012 | ret = do_splice_to(in, &pos, pipe, len, flags); |
51a92c0f | 1013 | if (unlikely(ret <= 0)) |
b92ce558 JA |
1014 | goto out_release; |
1015 | ||
1016 | read_len = ret; | |
c66ab6fa | 1017 | sd->total_len = read_len; |
b92ce558 JA |
1018 | |
1019 | /* | |
1020 | * NOTE: nonblocking mode only applies to the input. We | |
1021 | * must not do the output in nonblocking mode as then we | |
1022 | * could get stuck data in the internal pipe: | |
1023 | */ | |
c66ab6fa | 1024 | ret = actor(pipe, sd); |
51a92c0f | 1025 | if (unlikely(ret <= 0)) |
b92ce558 JA |
1026 | goto out_release; |
1027 | ||
1028 | bytes += ret; | |
1029 | len -= ret; | |
bcd4f3ac | 1030 | sd->pos = pos; |
b92ce558 | 1031 | |
51a92c0f JA |
1032 | if (ret < read_len) |
1033 | goto out_release; | |
b92ce558 JA |
1034 | } |
1035 | ||
1036 | pipe->nrbufs = pipe->curbuf = 0; | |
b92ce558 JA |
1037 | return bytes; |
1038 | ||
1039 | out_release: | |
1040 | /* | |
1041 | * If we did an incomplete transfer we must release | |
1042 | * the pipe buffers in question: | |
1043 | */ | |
1044 | for (i = 0; i < PIPE_BUFFERS; i++) { | |
1045 | struct pipe_buffer *buf = pipe->bufs + i; | |
1046 | ||
1047 | if (buf->ops) { | |
1048 | buf->ops->release(pipe, buf); | |
1049 | buf->ops = NULL; | |
1050 | } | |
1051 | } | |
1052 | pipe->nrbufs = pipe->curbuf = 0; | |
1053 | ||
1054 | /* | |
1055 | * If we transferred some data, return the number of bytes: | |
1056 | */ | |
1057 | if (bytes > 0) | |
1058 | return bytes; | |
1059 | ||
1060 | return ret; | |
c66ab6fa JA |
1061 | |
1062 | } | |
1063 | EXPORT_SYMBOL(splice_direct_to_actor); | |
1064 | ||
1065 | static int direct_splice_actor(struct pipe_inode_info *pipe, | |
1066 | struct splice_desc *sd) | |
1067 | { | |
6a14b90b | 1068 | struct file *file = sd->u.file; |
c66ab6fa JA |
1069 | |
1070 | return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags); | |
1071 | } | |
1072 | ||
932cc6d4 JA |
1073 | /** |
1074 | * do_splice_direct - splices data directly between two files | |
1075 | * @in: file to splice from | |
1076 | * @ppos: input file offset | |
1077 | * @out: file to splice to | |
1078 | * @len: number of bytes to splice | |
1079 | * @flags: splice modifier flags | |
1080 | * | |
1081 | * Description: | |
1082 | * For use by do_sendfile(). splice can easily emulate sendfile, but | |
1083 | * doing it in the application would incur an extra system call | |
1084 | * (splice in + splice out, as compared to just sendfile()). So this helper | |
1085 | * can splice directly through a process-private pipe. | |
1086 | * | |
1087 | */ | |
c66ab6fa JA |
1088 | long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, |
1089 | size_t len, unsigned int flags) | |
1090 | { | |
1091 | struct splice_desc sd = { | |
1092 | .len = len, | |
1093 | .total_len = len, | |
1094 | .flags = flags, | |
1095 | .pos = *ppos, | |
6a14b90b | 1096 | .u.file = out, |
c66ab6fa | 1097 | }; |
51a92c0f | 1098 | long ret; |
c66ab6fa JA |
1099 | |
1100 | ret = splice_direct_to_actor(in, &sd, direct_splice_actor); | |
51a92c0f JA |
1101 | if (ret > 0) |
1102 | *ppos += ret; | |
1103 | ||
c66ab6fa | 1104 | return ret; |
b92ce558 JA |
1105 | } |
1106 | ||
ddac0d39 JA |
1107 | /* |
1108 | * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same | |
1109 | * location, so checking ->i_pipe is not enough to verify that this is a | |
1110 | * pipe. | |
1111 | */ | |
1112 | static inline struct pipe_inode_info *pipe_info(struct inode *inode) | |
1113 | { | |
1114 | if (S_ISFIFO(inode->i_mode)) | |
1115 | return inode->i_pipe; | |
1116 | ||
1117 | return NULL; | |
1118 | } | |
1119 | ||
83f9135b JA |
1120 | /* |
1121 | * Determine where to splice to/from. | |
1122 | */ | |
529565dc IM |
1123 | static long do_splice(struct file *in, loff_t __user *off_in, |
1124 | struct file *out, loff_t __user *off_out, | |
1125 | size_t len, unsigned int flags) | |
5274f052 | 1126 | { |
3a326a2c | 1127 | struct pipe_inode_info *pipe; |
cbb7e577 | 1128 | loff_t offset, *off; |
a4514ebd | 1129 | long ret; |
5274f052 | 1130 | |
0f7fc9e4 | 1131 | pipe = pipe_info(in->f_path.dentry->d_inode); |
529565dc IM |
1132 | if (pipe) { |
1133 | if (off_in) | |
1134 | return -ESPIPE; | |
b92ce558 JA |
1135 | if (off_out) { |
1136 | if (out->f_op->llseek == no_llseek) | |
1137 | return -EINVAL; | |
cbb7e577 | 1138 | if (copy_from_user(&offset, off_out, sizeof(loff_t))) |
b92ce558 | 1139 | return -EFAULT; |
cbb7e577 JA |
1140 | off = &offset; |
1141 | } else | |
1142 | off = &out->f_pos; | |
529565dc | 1143 | |
a4514ebd JA |
1144 | ret = do_splice_from(pipe, out, off, len, flags); |
1145 | ||
1146 | if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) | |
1147 | ret = -EFAULT; | |
1148 | ||
1149 | return ret; | |
529565dc | 1150 | } |
5274f052 | 1151 | |
0f7fc9e4 | 1152 | pipe = pipe_info(out->f_path.dentry->d_inode); |
529565dc IM |
1153 | if (pipe) { |
1154 | if (off_out) | |
1155 | return -ESPIPE; | |
b92ce558 JA |
1156 | if (off_in) { |
1157 | if (in->f_op->llseek == no_llseek) | |
1158 | return -EINVAL; | |
cbb7e577 | 1159 | if (copy_from_user(&offset, off_in, sizeof(loff_t))) |
b92ce558 | 1160 | return -EFAULT; |
cbb7e577 JA |
1161 | off = &offset; |
1162 | } else | |
1163 | off = &in->f_pos; | |
529565dc | 1164 | |
a4514ebd JA |
1165 | ret = do_splice_to(in, off, pipe, len, flags); |
1166 | ||
1167 | if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) | |
1168 | ret = -EFAULT; | |
1169 | ||
1170 | return ret; | |
529565dc | 1171 | } |
5274f052 JA |
1172 | |
1173 | return -EINVAL; | |
1174 | } | |
1175 | ||
75723957 LT |
1176 | /* |
1177 | * Do a copy-from-user while holding the mmap_semaphore for reading, in a | |
1178 | * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem | |
1179 | * for writing) and page faulting on the user memory pointed to by src. | |
1180 | * This assumes that we will very rarely hit the partial != 0 path, or this | |
1181 | * will not be a win. | |
1182 | */ | |
1183 | static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) | |
1184 | { | |
1185 | int partial; | |
1186 | ||
1187 | pagefault_disable(); | |
1188 | partial = __copy_from_user_inatomic(dst, src, n); | |
1189 | pagefault_enable(); | |
1190 | ||
1191 | /* | |
1192 | * Didn't copy everything, drop the mmap_sem and do a faulting copy | |
1193 | */ | |
1194 | if (unlikely(partial)) { | |
1195 | up_read(¤t->mm->mmap_sem); | |
1196 | partial = copy_from_user(dst, src, n); | |
1197 | down_read(¤t->mm->mmap_sem); | |
1198 | } | |
1199 | ||
1200 | return partial; | |
1201 | } | |
1202 | ||
912d35f8 JA |
1203 | /* |
1204 | * Map an iov into an array of pages and offset/length tupples. With the | |
1205 | * partial_page structure, we can map several non-contiguous ranges into | |
1206 | * our ones pages[] map instead of splitting that operation into pieces. | |
1207 | * Could easily be exported as a generic helper for other users, in which | |
1208 | * case one would probably want to add a 'max_nr_pages' parameter as well. | |
1209 | */ | |
1210 | static int get_iovec_page_array(const struct iovec __user *iov, | |
1211 | unsigned int nr_vecs, struct page **pages, | |
7afa6fd0 | 1212 | struct partial_page *partial, int aligned) |
912d35f8 JA |
1213 | { |
1214 | int buffers = 0, error = 0; | |
1215 | ||
912d35f8 JA |
1216 | down_read(¤t->mm->mmap_sem); |
1217 | ||
1218 | while (nr_vecs) { | |
1219 | unsigned long off, npages; | |
75723957 | 1220 | struct iovec entry; |
912d35f8 JA |
1221 | void __user *base; |
1222 | size_t len; | |
1223 | int i; | |
1224 | ||
75723957 LT |
1225 | error = -EFAULT; |
1226 | if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) | |
912d35f8 JA |
1227 | break; |
1228 | ||
75723957 LT |
1229 | base = entry.iov_base; |
1230 | len = entry.iov_len; | |
1231 | ||
912d35f8 JA |
1232 | /* |
1233 | * Sanity check this iovec. 0 read succeeds. | |
1234 | */ | |
75723957 | 1235 | error = 0; |
912d35f8 JA |
1236 | if (unlikely(!len)) |
1237 | break; | |
1238 | error = -EFAULT; | |
1239 | if (unlikely(!base)) | |
1240 | break; | |
1241 | ||
1242 | /* | |
1243 | * Get this base offset and number of pages, then map | |
1244 | * in the user pages. | |
1245 | */ | |
1246 | off = (unsigned long) base & ~PAGE_MASK; | |
7afa6fd0 JA |
1247 | |
1248 | /* | |
1249 | * If asked for alignment, the offset must be zero and the | |
1250 | * length a multiple of the PAGE_SIZE. | |
1251 | */ | |
1252 | error = -EINVAL; | |
1253 | if (aligned && (off || len & ~PAGE_MASK)) | |
1254 | break; | |
1255 | ||
912d35f8 JA |
1256 | npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1257 | if (npages > PIPE_BUFFERS - buffers) | |
1258 | npages = PIPE_BUFFERS - buffers; | |
1259 | ||
1260 | error = get_user_pages(current, current->mm, | |
1261 | (unsigned long) base, npages, 0, 0, | |
1262 | &pages[buffers], NULL); | |
1263 | ||
1264 | if (unlikely(error <= 0)) | |
1265 | break; | |
1266 | ||
1267 | /* | |
1268 | * Fill this contiguous range into the partial page map. | |
1269 | */ | |
1270 | for (i = 0; i < error; i++) { | |
7591489a | 1271 | const int plen = min_t(size_t, len, PAGE_SIZE - off); |
912d35f8 JA |
1272 | |
1273 | partial[buffers].offset = off; | |
1274 | partial[buffers].len = plen; | |
1275 | ||
1276 | off = 0; | |
1277 | len -= plen; | |
1278 | buffers++; | |
1279 | } | |
1280 | ||
1281 | /* | |
1282 | * We didn't complete this iov, stop here since it probably | |
1283 | * means we have to move some of this into a pipe to | |
1284 | * be able to continue. | |
1285 | */ | |
1286 | if (len) | |
1287 | break; | |
1288 | ||
1289 | /* | |
1290 | * Don't continue if we mapped fewer pages than we asked for, | |
1291 | * or if we mapped the max number of pages that we have | |
1292 | * room for. | |
1293 | */ | |
1294 | if (error < npages || buffers == PIPE_BUFFERS) | |
1295 | break; | |
1296 | ||
1297 | nr_vecs--; | |
1298 | iov++; | |
1299 | } | |
1300 | ||
1301 | up_read(¤t->mm->mmap_sem); | |
1302 | ||
1303 | if (buffers) | |
1304 | return buffers; | |
1305 | ||
1306 | return error; | |
1307 | } | |
1308 | ||
6a14b90b JA |
1309 | static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, |
1310 | struct splice_desc *sd) | |
1311 | { | |
1312 | char *src; | |
1313 | int ret; | |
1314 | ||
cac36bb0 | 1315 | ret = buf->ops->confirm(pipe, buf); |
6a14b90b JA |
1316 | if (unlikely(ret)) |
1317 | return ret; | |
1318 | ||
1319 | /* | |
1320 | * See if we can use the atomic maps, by prefaulting in the | |
1321 | * pages and doing an atomic copy | |
1322 | */ | |
1323 | if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) { | |
1324 | src = buf->ops->map(pipe, buf, 1); | |
1325 | ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset, | |
1326 | sd->len); | |
1327 | buf->ops->unmap(pipe, buf, src); | |
1328 | if (!ret) { | |
1329 | ret = sd->len; | |
1330 | goto out; | |
1331 | } | |
1332 | } | |
1333 | ||
1334 | /* | |
1335 | * No dice, use slow non-atomic map and copy | |
1336 | */ | |
1337 | src = buf->ops->map(pipe, buf, 0); | |
1338 | ||
1339 | ret = sd->len; | |
1340 | if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len)) | |
1341 | ret = -EFAULT; | |
1342 | ||
6866bef4 | 1343 | buf->ops->unmap(pipe, buf, src); |
6a14b90b JA |
1344 | out: |
1345 | if (ret > 0) | |
1346 | sd->u.userptr += ret; | |
6a14b90b JA |
1347 | return ret; |
1348 | } | |
1349 | ||
1350 | /* | |
1351 | * For lack of a better implementation, implement vmsplice() to userspace | |
1352 | * as a simple copy of the pipes pages to the user iov. | |
1353 | */ | |
1354 | static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, | |
1355 | unsigned long nr_segs, unsigned int flags) | |
1356 | { | |
1357 | struct pipe_inode_info *pipe; | |
1358 | struct splice_desc sd; | |
1359 | ssize_t size; | |
1360 | int error; | |
1361 | long ret; | |
1362 | ||
1363 | pipe = pipe_info(file->f_path.dentry->d_inode); | |
1364 | if (!pipe) | |
1365 | return -EBADF; | |
1366 | ||
1367 | if (pipe->inode) | |
1368 | mutex_lock(&pipe->inode->i_mutex); | |
1369 | ||
1370 | error = ret = 0; | |
1371 | while (nr_segs) { | |
1372 | void __user *base; | |
1373 | size_t len; | |
1374 | ||
1375 | /* | |
1376 | * Get user address base and length for this iovec. | |
1377 | */ | |
1378 | error = get_user(base, &iov->iov_base); | |
1379 | if (unlikely(error)) | |
1380 | break; | |
1381 | error = get_user(len, &iov->iov_len); | |
1382 | if (unlikely(error)) | |
1383 | break; | |
1384 | ||
1385 | /* | |
1386 | * Sanity check this iovec. 0 read succeeds. | |
1387 | */ | |
1388 | if (unlikely(!len)) | |
1389 | break; | |
1390 | if (unlikely(!base)) { | |
1391 | error = -EFAULT; | |
1392 | break; | |
1393 | } | |
1394 | ||
1395 | sd.len = 0; | |
1396 | sd.total_len = len; | |
1397 | sd.flags = flags; | |
1398 | sd.u.userptr = base; | |
1399 | sd.pos = 0; | |
1400 | ||
1401 | size = __splice_from_pipe(pipe, &sd, pipe_to_user); | |
1402 | if (size < 0) { | |
1403 | if (!ret) | |
1404 | ret = size; | |
1405 | ||
1406 | break; | |
1407 | } | |
1408 | ||
1409 | ret += size; | |
1410 | ||
1411 | if (size < len) | |
1412 | break; | |
1413 | ||
1414 | nr_segs--; | |
1415 | iov++; | |
1416 | } | |
1417 | ||
1418 | if (pipe->inode) | |
1419 | mutex_unlock(&pipe->inode->i_mutex); | |
1420 | ||
1421 | if (!ret) | |
1422 | ret = error; | |
1423 | ||
1424 | return ret; | |
1425 | } | |
1426 | ||
912d35f8 JA |
1427 | /* |
1428 | * vmsplice splices a user address range into a pipe. It can be thought of | |
1429 | * as splice-from-memory, where the regular splice is splice-from-file (or | |
1430 | * to file). In both cases the output is a pipe, naturally. | |
912d35f8 | 1431 | */ |
6a14b90b JA |
1432 | static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, |
1433 | unsigned long nr_segs, unsigned int flags) | |
912d35f8 | 1434 | { |
ddac0d39 | 1435 | struct pipe_inode_info *pipe; |
912d35f8 JA |
1436 | struct page *pages[PIPE_BUFFERS]; |
1437 | struct partial_page partial[PIPE_BUFFERS]; | |
1438 | struct splice_pipe_desc spd = { | |
1439 | .pages = pages, | |
1440 | .partial = partial, | |
1441 | .flags = flags, | |
1442 | .ops = &user_page_pipe_buf_ops, | |
1443 | }; | |
1444 | ||
0f7fc9e4 | 1445 | pipe = pipe_info(file->f_path.dentry->d_inode); |
ddac0d39 | 1446 | if (!pipe) |
912d35f8 | 1447 | return -EBADF; |
912d35f8 | 1448 | |
7afa6fd0 JA |
1449 | spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial, |
1450 | flags & SPLICE_F_GIFT); | |
912d35f8 JA |
1451 | if (spd.nr_pages <= 0) |
1452 | return spd.nr_pages; | |
1453 | ||
00522fb4 | 1454 | return splice_to_pipe(pipe, &spd); |
912d35f8 JA |
1455 | } |
1456 | ||
6a14b90b JA |
1457 | /* |
1458 | * Note that vmsplice only really supports true splicing _from_ user memory | |
1459 | * to a pipe, not the other way around. Splicing from user memory is a simple | |
1460 | * operation that can be supported without any funky alignment restrictions | |
1461 | * or nasty vm tricks. We simply map in the user memory and fill them into | |
1462 | * a pipe. The reverse isn't quite as easy, though. There are two possible | |
1463 | * solutions for that: | |
1464 | * | |
1465 | * - memcpy() the data internally, at which point we might as well just | |
1466 | * do a regular read() on the buffer anyway. | |
1467 | * - Lots of nasty vm tricks, that are neither fast nor flexible (it | |
1468 | * has restriction limitations on both ends of the pipe). | |
1469 | * | |
1470 | * Currently we punt and implement it as a normal copy, see pipe_to_user(). | |
1471 | * | |
1472 | */ | |
912d35f8 JA |
1473 | asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, |
1474 | unsigned long nr_segs, unsigned int flags) | |
1475 | { | |
1476 | struct file *file; | |
1477 | long error; | |
1478 | int fput; | |
1479 | ||
6a14b90b JA |
1480 | if (unlikely(nr_segs > UIO_MAXIOV)) |
1481 | return -EINVAL; | |
1482 | else if (unlikely(!nr_segs)) | |
1483 | return 0; | |
1484 | ||
912d35f8 JA |
1485 | error = -EBADF; |
1486 | file = fget_light(fd, &fput); | |
1487 | if (file) { | |
1488 | if (file->f_mode & FMODE_WRITE) | |
6a14b90b JA |
1489 | error = vmsplice_to_pipe(file, iov, nr_segs, flags); |
1490 | else if (file->f_mode & FMODE_READ) | |
1491 | error = vmsplice_to_user(file, iov, nr_segs, flags); | |
912d35f8 JA |
1492 | |
1493 | fput_light(file, fput); | |
1494 | } | |
1495 | ||
1496 | return error; | |
1497 | } | |
1498 | ||
529565dc IM |
1499 | asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, |
1500 | int fd_out, loff_t __user *off_out, | |
1501 | size_t len, unsigned int flags) | |
5274f052 JA |
1502 | { |
1503 | long error; | |
1504 | struct file *in, *out; | |
1505 | int fput_in, fput_out; | |
1506 | ||
1507 | if (unlikely(!len)) | |
1508 | return 0; | |
1509 | ||
1510 | error = -EBADF; | |
529565dc | 1511 | in = fget_light(fd_in, &fput_in); |
5274f052 JA |
1512 | if (in) { |
1513 | if (in->f_mode & FMODE_READ) { | |
529565dc | 1514 | out = fget_light(fd_out, &fput_out); |
5274f052 JA |
1515 | if (out) { |
1516 | if (out->f_mode & FMODE_WRITE) | |
529565dc IM |
1517 | error = do_splice(in, off_in, |
1518 | out, off_out, | |
1519 | len, flags); | |
5274f052 JA |
1520 | fput_light(out, fput_out); |
1521 | } | |
1522 | } | |
1523 | ||
1524 | fput_light(in, fput_in); | |
1525 | } | |
1526 | ||
1527 | return error; | |
1528 | } | |
70524490 | 1529 | |
aadd06e5 JA |
1530 | /* |
1531 | * Make sure there's data to read. Wait for input if we can, otherwise | |
1532 | * return an appropriate error. | |
1533 | */ | |
1534 | static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) | |
1535 | { | |
1536 | int ret; | |
1537 | ||
1538 | /* | |
1539 | * Check ->nrbufs without the inode lock first. This function | |
1540 | * is speculative anyways, so missing one is ok. | |
1541 | */ | |
1542 | if (pipe->nrbufs) | |
1543 | return 0; | |
1544 | ||
1545 | ret = 0; | |
1546 | mutex_lock(&pipe->inode->i_mutex); | |
1547 | ||
1548 | while (!pipe->nrbufs) { | |
1549 | if (signal_pending(current)) { | |
1550 | ret = -ERESTARTSYS; | |
1551 | break; | |
1552 | } | |
1553 | if (!pipe->writers) | |
1554 | break; | |
1555 | if (!pipe->waiting_writers) { | |
1556 | if (flags & SPLICE_F_NONBLOCK) { | |
1557 | ret = -EAGAIN; | |
1558 | break; | |
1559 | } | |
1560 | } | |
1561 | pipe_wait(pipe); | |
1562 | } | |
1563 | ||
1564 | mutex_unlock(&pipe->inode->i_mutex); | |
1565 | return ret; | |
1566 | } | |
1567 | ||
1568 | /* | |
1569 | * Make sure there's writeable room. Wait for room if we can, otherwise | |
1570 | * return an appropriate error. | |
1571 | */ | |
1572 | static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) | |
1573 | { | |
1574 | int ret; | |
1575 | ||
1576 | /* | |
1577 | * Check ->nrbufs without the inode lock first. This function | |
1578 | * is speculative anyways, so missing one is ok. | |
1579 | */ | |
1580 | if (pipe->nrbufs < PIPE_BUFFERS) | |
1581 | return 0; | |
1582 | ||
1583 | ret = 0; | |
1584 | mutex_lock(&pipe->inode->i_mutex); | |
1585 | ||
1586 | while (pipe->nrbufs >= PIPE_BUFFERS) { | |
1587 | if (!pipe->readers) { | |
1588 | send_sig(SIGPIPE, current, 0); | |
1589 | ret = -EPIPE; | |
1590 | break; | |
1591 | } | |
1592 | if (flags & SPLICE_F_NONBLOCK) { | |
1593 | ret = -EAGAIN; | |
1594 | break; | |
1595 | } | |
1596 | if (signal_pending(current)) { | |
1597 | ret = -ERESTARTSYS; | |
1598 | break; | |
1599 | } | |
1600 | pipe->waiting_writers++; | |
1601 | pipe_wait(pipe); | |
1602 | pipe->waiting_writers--; | |
1603 | } | |
1604 | ||
1605 | mutex_unlock(&pipe->inode->i_mutex); | |
1606 | return ret; | |
1607 | } | |
1608 | ||
70524490 JA |
1609 | /* |
1610 | * Link contents of ipipe to opipe. | |
1611 | */ | |
1612 | static int link_pipe(struct pipe_inode_info *ipipe, | |
1613 | struct pipe_inode_info *opipe, | |
1614 | size_t len, unsigned int flags) | |
1615 | { | |
1616 | struct pipe_buffer *ibuf, *obuf; | |
aadd06e5 | 1617 | int ret = 0, i = 0, nbuf; |
70524490 JA |
1618 | |
1619 | /* | |
1620 | * Potential ABBA deadlock, work around it by ordering lock | |
1621 | * grabbing by inode address. Otherwise two different processes | |
1622 | * could deadlock (one doing tee from A -> B, the other from B -> A). | |
1623 | */ | |
62752ee1 | 1624 | inode_double_lock(ipipe->inode, opipe->inode); |
70524490 | 1625 | |
aadd06e5 | 1626 | do { |
70524490 JA |
1627 | if (!opipe->readers) { |
1628 | send_sig(SIGPIPE, current, 0); | |
1629 | if (!ret) | |
1630 | ret = -EPIPE; | |
1631 | break; | |
1632 | } | |
70524490 | 1633 | |
aadd06e5 JA |
1634 | /* |
1635 | * If we have iterated all input buffers or ran out of | |
1636 | * output room, break. | |
1637 | */ | |
1638 | if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) | |
1639 | break; | |
70524490 | 1640 | |
aadd06e5 JA |
1641 | ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1)); |
1642 | nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1); | |
70524490 JA |
1643 | |
1644 | /* | |
aadd06e5 JA |
1645 | * Get a reference to this pipe buffer, |
1646 | * so we can copy the contents over. | |
70524490 | 1647 | */ |
aadd06e5 JA |
1648 | ibuf->ops->get(ipipe, ibuf); |
1649 | ||
1650 | obuf = opipe->bufs + nbuf; | |
1651 | *obuf = *ibuf; | |
1652 | ||
2a27250e | 1653 | /* |
aadd06e5 JA |
1654 | * Don't inherit the gift flag, we need to |
1655 | * prevent multiple steals of this page. | |
2a27250e | 1656 | */ |
aadd06e5 | 1657 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; |
70524490 | 1658 | |
aadd06e5 JA |
1659 | if (obuf->len > len) |
1660 | obuf->len = len; | |
70524490 | 1661 | |
aadd06e5 JA |
1662 | opipe->nrbufs++; |
1663 | ret += obuf->len; | |
1664 | len -= obuf->len; | |
1665 | i++; | |
1666 | } while (len); | |
70524490 | 1667 | |
62752ee1 | 1668 | inode_double_unlock(ipipe->inode, opipe->inode); |
70524490 | 1669 | |
aadd06e5 JA |
1670 | /* |
1671 | * If we put data in the output pipe, wakeup any potential readers. | |
1672 | */ | |
1673 | if (ret > 0) { | |
70524490 JA |
1674 | smp_mb(); |
1675 | if (waitqueue_active(&opipe->wait)) | |
1676 | wake_up_interruptible(&opipe->wait); | |
1677 | kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN); | |
1678 | } | |
1679 | ||
1680 | return ret; | |
1681 | } | |
1682 | ||
1683 | /* | |
1684 | * This is a tee(1) implementation that works on pipes. It doesn't copy | |
1685 | * any data, it simply references the 'in' pages on the 'out' pipe. | |
1686 | * The 'flags' used are the SPLICE_F_* variants, currently the only | |
1687 | * applicable one is SPLICE_F_NONBLOCK. | |
1688 | */ | |
1689 | static long do_tee(struct file *in, struct file *out, size_t len, | |
1690 | unsigned int flags) | |
1691 | { | |
0f7fc9e4 JJS |
1692 | struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode); |
1693 | struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode); | |
aadd06e5 | 1694 | int ret = -EINVAL; |
70524490 JA |
1695 | |
1696 | /* | |
aadd06e5 JA |
1697 | * Duplicate the contents of ipipe to opipe without actually |
1698 | * copying the data. | |
70524490 | 1699 | */ |
aadd06e5 JA |
1700 | if (ipipe && opipe && ipipe != opipe) { |
1701 | /* | |
1702 | * Keep going, unless we encounter an error. The ipipe/opipe | |
1703 | * ordering doesn't really matter. | |
1704 | */ | |
1705 | ret = link_ipipe_prep(ipipe, flags); | |
1706 | if (!ret) { | |
1707 | ret = link_opipe_prep(opipe, flags); | |
1708 | if (!ret) { | |
1709 | ret = link_pipe(ipipe, opipe, len, flags); | |
1710 | if (!ret && (flags & SPLICE_F_NONBLOCK)) | |
1711 | ret = -EAGAIN; | |
1712 | } | |
1713 | } | |
1714 | } | |
70524490 | 1715 | |
aadd06e5 | 1716 | return ret; |
70524490 JA |
1717 | } |
1718 | ||
1719 | asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags) | |
1720 | { | |
1721 | struct file *in; | |
1722 | int error, fput_in; | |
1723 | ||
1724 | if (unlikely(!len)) | |
1725 | return 0; | |
1726 | ||
1727 | error = -EBADF; | |
1728 | in = fget_light(fdin, &fput_in); | |
1729 | if (in) { | |
1730 | if (in->f_mode & FMODE_READ) { | |
1731 | int fput_out; | |
1732 | struct file *out = fget_light(fdout, &fput_out); | |
1733 | ||
1734 | if (out) { | |
1735 | if (out->f_mode & FMODE_WRITE) | |
1736 | error = do_tee(in, out, len, flags); | |
1737 | fput_light(out, fput_out); | |
1738 | } | |
1739 | } | |
1740 | fput_light(in, fput_in); | |
1741 | } | |
1742 | ||
1743 | return error; | |
1744 | } |