]>
Commit | Line | Data |
---|---|---|
5db11c21 MM |
1 | /* |
2 | * (C) 2001 Clemson University and The University of Chicago | |
3 | * | |
4 | * See COPYING in top-level directory. | |
5 | */ | |
6 | ||
7 | /* | |
8 | * Linux VFS file operations. | |
9 | */ | |
10 | ||
11 | #include "protocol.h" | |
12 | #include "pvfs2-kernel.h" | |
13 | #include "pvfs2-bufmap.h" | |
14 | #include <linux/fs.h> | |
15 | #include <linux/pagemap.h> | |
16 | ||
17 | #define wake_up_daemon_for_return(op) \ | |
18 | do { \ | |
19 | spin_lock(&op->lock); \ | |
20 | op->io_completed = 1; \ | |
21 | spin_unlock(&op->lock); \ | |
22 | wake_up_interruptible(&op->io_completion_waitq);\ | |
23 | } while (0) | |
24 | ||
25 | /* | |
26 | * Copy to client-core's address space from the buffers specified | |
27 | * by the iovec upto total_size bytes. | |
28 | * NOTE: the iovector can either contain addresses which | |
29 | * can futher be kernel-space or user-space addresses. | |
30 | * or it can pointers to struct page's | |
31 | */ | |
32 | static int precopy_buffers(struct pvfs2_bufmap *bufmap, | |
33 | int buffer_index, | |
34 | const struct iovec *vec, | |
35 | unsigned long nr_segs, | |
4d1c4404 | 36 | size_t total_size) |
5db11c21 MM |
37 | { |
38 | int ret = 0; | |
4d1c4404 | 39 | struct iov_iter iter; |
5db11c21 MM |
40 | |
41 | /* | |
42 | * copy data from application/kernel by pulling it out | |
43 | * of the iovec. | |
44 | */ | |
4d1c4404 MM |
45 | |
46 | ||
47 | if (total_size) { | |
48 | iov_iter_init(&iter, WRITE, vec, nr_segs, total_size); | |
49 | ret = pvfs_bufmap_copy_from_iovec(bufmap, | |
50 | &iter, | |
51 | buffer_index, | |
52 | total_size); | |
53 | if (ret < 0) | |
54 | gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", | |
55 | __func__, | |
56 | (long)ret); | |
57 | ||
58 | } | |
59 | ||
5db11c21 MM |
60 | if (ret < 0) |
61 | gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", | |
62 | __func__, | |
63 | (long)ret); | |
64 | return ret; | |
65 | } | |
66 | ||
67 | /* | |
68 | * Copy from client-core's address space to the buffers specified | |
69 | * by the iovec upto total_size bytes. | |
70 | * NOTE: the iovector can either contain addresses which | |
71 | * can futher be kernel-space or user-space addresses. | |
72 | * or it can pointers to struct page's | |
73 | */ | |
74 | static int postcopy_buffers(struct pvfs2_bufmap *bufmap, | |
75 | int buffer_index, | |
76 | const struct iovec *vec, | |
77 | int nr_segs, | |
4d1c4404 | 78 | size_t total_size) |
5db11c21 MM |
79 | { |
80 | int ret = 0; | |
81 | ||
4d1c4404 MM |
82 | struct iov_iter iter; |
83 | ||
5db11c21 MM |
84 | /* |
85 | * copy data to application/kernel by pushing it out to | |
86 | * the iovec. NOTE; target buffers can be addresses or | |
87 | * struct page pointers. | |
88 | */ | |
89 | if (total_size) { | |
4d1c4404 MM |
90 | iov_iter_init(&iter, READ, vec, nr_segs, total_size); |
91 | ret = pvfs_bufmap_copy_to_iovec(bufmap, | |
92 | &iter, | |
93 | buffer_index); | |
5db11c21 | 94 | if (ret < 0) |
4d1c4404 | 95 | gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n", |
5db11c21 MM |
96 | __func__, |
97 | (long)ret); | |
98 | } | |
99 | return ret; | |
100 | } | |
101 | ||
102 | /* | |
103 | * Post and wait for the I/O upcall to finish | |
104 | */ | |
105 | static ssize_t wait_for_direct_io(enum PVFS_io_type type, struct inode *inode, | |
106 | loff_t *offset, struct iovec *vec, unsigned long nr_segs, | |
4d1c4404 | 107 | size_t total_size, loff_t readahead_size) |
5db11c21 MM |
108 | { |
109 | struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode); | |
110 | struct pvfs2_khandle *handle = &pvfs2_inode->refn.khandle; | |
111 | struct pvfs2_bufmap *bufmap = NULL; | |
112 | struct pvfs2_kernel_op_s *new_op = NULL; | |
113 | int buffer_index = -1; | |
114 | ssize_t ret; | |
115 | ||
116 | new_op = op_alloc(PVFS2_VFS_OP_FILE_IO); | |
117 | if (!new_op) { | |
118 | ret = -ENOMEM; | |
119 | goto out; | |
120 | } | |
121 | /* synchronous I/O */ | |
122 | new_op->upcall.req.io.async_vfs_io = PVFS_VFS_SYNC_IO; | |
123 | new_op->upcall.req.io.readahead_size = readahead_size; | |
124 | new_op->upcall.req.io.io_type = type; | |
125 | new_op->upcall.req.io.refn = pvfs2_inode->refn; | |
126 | ||
127 | populate_shared_memory: | |
128 | /* get a shared buffer index */ | |
129 | ret = pvfs_bufmap_get(&bufmap, &buffer_index); | |
130 | if (ret < 0) { | |
131 | gossip_debug(GOSSIP_FILE_DEBUG, | |
132 | "%s: pvfs_bufmap_get failure (%ld)\n", | |
133 | __func__, (long)ret); | |
134 | goto out; | |
135 | } | |
136 | gossip_debug(GOSSIP_FILE_DEBUG, | |
137 | "%s(%pU): GET op %p -> buffer_index %d\n", | |
138 | __func__, | |
139 | handle, | |
140 | new_op, | |
141 | buffer_index); | |
142 | ||
143 | new_op->uses_shared_memory = 1; | |
144 | new_op->upcall.req.io.buf_index = buffer_index; | |
145 | new_op->upcall.req.io.count = total_size; | |
146 | new_op->upcall.req.io.offset = *offset; | |
147 | ||
148 | gossip_debug(GOSSIP_FILE_DEBUG, | |
4d1c4404 | 149 | "%s(%pU): nr_segs %lu, offset: %llu total_size: %zd\n", |
5db11c21 MM |
150 | __func__, |
151 | handle, | |
5db11c21 MM |
152 | nr_segs, |
153 | llu(*offset), | |
154 | total_size); | |
155 | /* | |
156 | * Stage 1: copy the buffers into client-core's address space | |
157 | * precopy_buffers only pertains to writes. | |
158 | */ | |
159 | if (type == PVFS_IO_WRITE) { | |
160 | ret = precopy_buffers(bufmap, | |
161 | buffer_index, | |
162 | vec, | |
163 | nr_segs, | |
4d1c4404 | 164 | total_size); |
5db11c21 MM |
165 | if (ret < 0) |
166 | goto out; | |
167 | } | |
168 | ||
169 | gossip_debug(GOSSIP_FILE_DEBUG, | |
170 | "%s(%pU): Calling post_io_request with tag (%llu)\n", | |
171 | __func__, | |
172 | handle, | |
173 | llu(new_op->tag)); | |
174 | ||
175 | /* Stage 2: Service the I/O operation */ | |
176 | ret = service_operation(new_op, | |
177 | type == PVFS_IO_WRITE ? | |
178 | "file_write" : | |
179 | "file_read", | |
180 | get_interruptible_flag(inode)); | |
181 | ||
182 | /* | |
183 | * If service_operation() returns -EAGAIN #and# the operation was | |
184 | * purged from pvfs2_request_list or htable_ops_in_progress, then | |
185 | * we know that the client was restarted, causing the shared memory | |
186 | * area to be wiped clean. To restart a write operation in this | |
187 | * case, we must re-copy the data from the user's iovec to a NEW | |
188 | * shared memory location. To restart a read operation, we must get | |
189 | * a new shared memory location. | |
190 | */ | |
191 | if (ret == -EAGAIN && op_state_purged(new_op)) { | |
192 | pvfs_bufmap_put(bufmap, buffer_index); | |
193 | gossip_debug(GOSSIP_FILE_DEBUG, | |
194 | "%s:going to repopulate_shared_memory.\n", | |
195 | __func__); | |
196 | goto populate_shared_memory; | |
197 | } | |
198 | ||
199 | if (ret < 0) { | |
200 | handle_io_error(); /* defined in pvfs2-kernel.h */ | |
201 | /* | |
202 | don't write an error to syslog on signaled operation | |
203 | termination unless we've got debugging turned on, as | |
204 | this can happen regularly (i.e. ctrl-c) | |
205 | */ | |
206 | if (ret == -EINTR) | |
207 | gossip_debug(GOSSIP_FILE_DEBUG, | |
208 | "%s: returning error %ld\n", __func__, | |
209 | (long)ret); | |
210 | else | |
211 | gossip_err("%s: error in %s handle %pU, returning %zd\n", | |
212 | __func__, | |
213 | type == PVFS_IO_READ ? | |
214 | "read from" : "write to", | |
215 | handle, ret); | |
216 | goto out; | |
217 | } | |
218 | ||
219 | /* | |
220 | * Stage 3: Post copy buffers from client-core's address space | |
221 | * postcopy_buffers only pertains to reads. | |
222 | */ | |
223 | if (type == PVFS_IO_READ) { | |
224 | ret = postcopy_buffers(bufmap, | |
225 | buffer_index, | |
226 | vec, | |
227 | nr_segs, | |
4d1c4404 | 228 | new_op->downcall.resp.io.amt_complete); |
5db11c21 MM |
229 | if (ret < 0) { |
230 | /* | |
231 | * put error codes in downcall so that handle_io_error() | |
232 | * preserves it properly | |
233 | */ | |
234 | new_op->downcall.status = ret; | |
235 | handle_io_error(); | |
236 | goto out; | |
237 | } | |
238 | } | |
239 | gossip_debug(GOSSIP_FILE_DEBUG, | |
240 | "%s(%pU): Amount written as returned by the sys-io call:%d\n", | |
241 | __func__, | |
242 | handle, | |
243 | (int)new_op->downcall.resp.io.amt_complete); | |
244 | ||
245 | ret = new_op->downcall.resp.io.amt_complete; | |
246 | ||
247 | /* | |
248 | tell the device file owner waiting on I/O that this read has | |
249 | completed and it can return now. in this exact case, on | |
250 | wakeup the daemon will free the op, so we *cannot* touch it | |
251 | after this. | |
252 | */ | |
253 | wake_up_daemon_for_return(new_op); | |
254 | new_op = NULL; | |
255 | ||
256 | out: | |
257 | if (buffer_index >= 0) { | |
258 | pvfs_bufmap_put(bufmap, buffer_index); | |
259 | gossip_debug(GOSSIP_FILE_DEBUG, | |
260 | "%s(%pU): PUT buffer_index %d\n", | |
261 | __func__, handle, buffer_index); | |
262 | buffer_index = -1; | |
263 | } | |
264 | if (new_op) { | |
265 | op_release(new_op); | |
266 | new_op = NULL; | |
267 | } | |
268 | return ret; | |
269 | } | |
270 | ||
271 | /* | |
272 | * The reason we need to do this is to be able to support readv and writev | |
273 | * that are larger than (pvfs_bufmap_size_query()) Default is | |
274 | * PVFS2_BUFMAP_DEFAULT_DESC_SIZE MB. What that means is that we will | |
275 | * create a new io vec descriptor for those memory addresses that | |
276 | * go beyond the limit. Return value for this routine is negative in case | |
277 | * of errors and 0 in case of success. | |
278 | * | |
279 | * Further, the new_nr_segs pointer is updated to hold the new value | |
280 | * of number of iovecs, the new_vec pointer is updated to hold the pointer | |
281 | * to the new split iovec, and the size array is an array of integers holding | |
282 | * the number of iovecs that straddle pvfs_bufmap_size_query(). | |
283 | * The max_new_nr_segs value is computed by the caller and returned. | |
284 | * (It will be (count of all iov_len/ block_size) + 1). | |
285 | */ | |
286 | static int split_iovecs(unsigned long max_new_nr_segs, /* IN */ | |
287 | unsigned long nr_segs, /* IN */ | |
288 | const struct iovec *original_iovec, /* IN */ | |
289 | unsigned long *new_nr_segs, /* OUT */ | |
290 | struct iovec **new_vec, /* OUT */ | |
291 | unsigned long *seg_count, /* OUT */ | |
292 | unsigned long **seg_array) /* OUT */ | |
293 | { | |
294 | unsigned long seg; | |
295 | unsigned long count = 0; | |
296 | unsigned long begin_seg; | |
297 | unsigned long tmpnew_nr_segs = 0; | |
298 | struct iovec *new_iovec = NULL; | |
299 | struct iovec *orig_iovec; | |
300 | unsigned long *sizes = NULL; | |
301 | unsigned long sizes_count = 0; | |
302 | ||
303 | if (nr_segs <= 0 || | |
304 | original_iovec == NULL || | |
305 | new_nr_segs == NULL || | |
306 | new_vec == NULL || | |
307 | seg_count == NULL || | |
308 | seg_array == NULL || | |
309 | max_new_nr_segs <= 0) { | |
310 | gossip_err("Invalid parameters to split_iovecs\n"); | |
311 | return -EINVAL; | |
312 | } | |
313 | *new_nr_segs = 0; | |
314 | *new_vec = NULL; | |
315 | *seg_count = 0; | |
316 | *seg_array = NULL; | |
317 | /* copy the passed in iovec descriptor to a temp structure */ | |
318 | orig_iovec = kmalloc_array(nr_segs, | |
319 | sizeof(*orig_iovec), | |
320 | PVFS2_BUFMAP_GFP_FLAGS); | |
321 | if (orig_iovec == NULL) { | |
322 | gossip_err( | |
323 | "split_iovecs: Could not allocate memory for %lu bytes!\n", | |
324 | (unsigned long)(nr_segs * sizeof(*orig_iovec))); | |
325 | return -ENOMEM; | |
326 | } | |
327 | new_iovec = kcalloc(max_new_nr_segs, | |
328 | sizeof(*new_iovec), | |
329 | PVFS2_BUFMAP_GFP_FLAGS); | |
330 | if (new_iovec == NULL) { | |
331 | kfree(orig_iovec); | |
332 | gossip_err( | |
333 | "split_iovecs: Could not allocate memory for %lu bytes!\n", | |
334 | (unsigned long)(max_new_nr_segs * sizeof(*new_iovec))); | |
335 | return -ENOMEM; | |
336 | } | |
337 | sizes = kcalloc(max_new_nr_segs, | |
338 | sizeof(*sizes), | |
339 | PVFS2_BUFMAP_GFP_FLAGS); | |
340 | if (sizes == NULL) { | |
341 | kfree(new_iovec); | |
342 | kfree(orig_iovec); | |
343 | gossip_err( | |
344 | "split_iovecs: Could not allocate memory for %lu bytes!\n", | |
345 | (unsigned long)(max_new_nr_segs * sizeof(*sizes))); | |
346 | return -ENOMEM; | |
347 | } | |
348 | /* copy the passed in iovec to a temp structure */ | |
349 | memcpy(orig_iovec, original_iovec, nr_segs * sizeof(*orig_iovec)); | |
350 | begin_seg = 0; | |
351 | repeat: | |
352 | for (seg = begin_seg; seg < nr_segs; seg++) { | |
353 | if (tmpnew_nr_segs >= max_new_nr_segs || | |
354 | sizes_count >= max_new_nr_segs) { | |
355 | kfree(sizes); | |
356 | kfree(orig_iovec); | |
357 | kfree(new_iovec); | |
358 | gossip_err | |
359 | ("split_iovecs: exceeded the index limit (%lu)\n", | |
360 | tmpnew_nr_segs); | |
361 | return -EINVAL; | |
362 | } | |
363 | if (count + orig_iovec[seg].iov_len < | |
364 | pvfs_bufmap_size_query()) { | |
365 | count += orig_iovec[seg].iov_len; | |
366 | memcpy(&new_iovec[tmpnew_nr_segs], | |
367 | &orig_iovec[seg], | |
368 | sizeof(*new_iovec)); | |
369 | tmpnew_nr_segs++; | |
370 | sizes[sizes_count]++; | |
371 | } else { | |
372 | new_iovec[tmpnew_nr_segs].iov_base = | |
373 | orig_iovec[seg].iov_base; | |
374 | new_iovec[tmpnew_nr_segs].iov_len = | |
375 | (pvfs_bufmap_size_query() - count); | |
376 | tmpnew_nr_segs++; | |
377 | sizes[sizes_count]++; | |
378 | sizes_count++; | |
379 | begin_seg = seg; | |
380 | orig_iovec[seg].iov_base += | |
381 | (pvfs_bufmap_size_query() - count); | |
382 | orig_iovec[seg].iov_len -= | |
383 | (pvfs_bufmap_size_query() - count); | |
384 | count = 0; | |
385 | break; | |
386 | } | |
387 | } | |
388 | if (seg != nr_segs) | |
389 | goto repeat; | |
390 | else | |
391 | sizes_count++; | |
392 | ||
393 | *new_nr_segs = tmpnew_nr_segs; | |
394 | /* new_iovec is freed by the caller */ | |
395 | *new_vec = new_iovec; | |
396 | *seg_count = sizes_count; | |
397 | /* seg_array is also freed by the caller */ | |
398 | *seg_array = sizes; | |
399 | kfree(orig_iovec); | |
400 | return 0; | |
401 | } | |
402 | ||
403 | static long bound_max_iovecs(const struct iovec *curr, unsigned long nr_segs, | |
404 | ssize_t *total_count) | |
405 | { | |
406 | unsigned long i; | |
407 | long max_nr_iovecs; | |
408 | ssize_t total; | |
409 | ssize_t count; | |
410 | ||
411 | total = 0; | |
412 | count = 0; | |
413 | max_nr_iovecs = 0; | |
414 | for (i = 0; i < nr_segs; i++) { | |
415 | const struct iovec *iv = &curr[i]; | |
416 | ||
417 | count += iv->iov_len; | |
418 | if (unlikely((ssize_t) (count | iv->iov_len) < 0)) | |
419 | return -EINVAL; | |
420 | if (total + iv->iov_len < pvfs_bufmap_size_query()) { | |
421 | total += iv->iov_len; | |
422 | max_nr_iovecs++; | |
423 | } else { | |
424 | total = | |
425 | (total + iv->iov_len - pvfs_bufmap_size_query()); | |
426 | max_nr_iovecs += (total / pvfs_bufmap_size_query() + 2); | |
427 | } | |
428 | } | |
429 | *total_count = count; | |
430 | return max_nr_iovecs; | |
431 | } | |
432 | ||
433 | /* | |
434 | * Common entry point for read/write/readv/writev | |
435 | * This function will dispatch it to either the direct I/O | |
436 | * or buffered I/O path depending on the mount options and/or | |
437 | * augmented/extended metadata attached to the file. | |
438 | * Note: File extended attributes override any mount options. | |
439 | */ | |
440 | static ssize_t do_readv_writev(enum PVFS_io_type type, struct file *file, | |
441 | loff_t *offset, const struct iovec *iov, unsigned long nr_segs) | |
442 | { | |
443 | struct inode *inode = file->f_mapping->host; | |
444 | struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode); | |
445 | struct pvfs2_khandle *handle = &pvfs2_inode->refn.khandle; | |
446 | ssize_t ret; | |
447 | ssize_t total_count; | |
448 | unsigned int to_free; | |
449 | size_t count; | |
450 | unsigned long seg; | |
eeaa3d44 MM |
451 | unsigned long new_nr_segs; |
452 | unsigned long max_new_nr_segs; | |
453 | unsigned long seg_count; | |
454 | unsigned long *seg_array; | |
455 | struct iovec *iovecptr; | |
456 | struct iovec *ptr; | |
5db11c21 MM |
457 | |
458 | total_count = 0; | |
459 | ret = -EINVAL; | |
460 | count = 0; | |
461 | to_free = 0; | |
462 | ||
463 | /* Compute total and max number of segments after split */ | |
464 | max_new_nr_segs = bound_max_iovecs(iov, nr_segs, &count); | |
5db11c21 MM |
465 | |
466 | gossip_debug(GOSSIP_FILE_DEBUG, | |
467 | "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n", | |
468 | __func__, | |
469 | handle, | |
470 | (int)count); | |
471 | ||
472 | if (type == PVFS_IO_WRITE) { | |
473 | gossip_debug(GOSSIP_FILE_DEBUG, | |
474 | "%s(%pU): proceeding with offset : %llu, " | |
475 | "size %d\n", | |
476 | __func__, | |
477 | handle, | |
478 | llu(*offset), | |
479 | (int)count); | |
480 | } | |
481 | ||
482 | if (count == 0) { | |
483 | ret = 0; | |
484 | goto out; | |
485 | } | |
486 | ||
487 | /* | |
488 | * if the total size of data transfer requested is greater than | |
489 | * the kernel-set blocksize of PVFS2, then we split the iovecs | |
490 | * such that no iovec description straddles a block size limit | |
491 | */ | |
492 | ||
493 | gossip_debug(GOSSIP_FILE_DEBUG, | |
494 | "%s: pvfs_bufmap_size:%d\n", | |
495 | __func__, | |
496 | pvfs_bufmap_size_query()); | |
497 | ||
498 | if (count > pvfs_bufmap_size_query()) { | |
499 | /* | |
500 | * Split up the given iovec description such that | |
501 | * no iovec descriptor straddles over the block-size limitation. | |
502 | * This makes us our job easier to stage the I/O. | |
503 | * In addition, this function will also compute an array | |
504 | * with seg_count entries that will store the number of | |
505 | * segments that straddle the block-size boundaries. | |
506 | */ | |
507 | ret = split_iovecs(max_new_nr_segs, /* IN */ | |
508 | nr_segs, /* IN */ | |
509 | iov, /* IN */ | |
510 | &new_nr_segs, /* OUT */ | |
511 | &iovecptr, /* OUT */ | |
512 | &seg_count, /* OUT */ | |
513 | &seg_array); /* OUT */ | |
514 | if (ret < 0) { | |
515 | gossip_err("%s: Failed to split iovecs to satisfy larger than blocksize readv/writev request %zd\n", | |
516 | __func__, | |
517 | ret); | |
518 | goto out; | |
519 | } | |
520 | gossip_debug(GOSSIP_FILE_DEBUG, | |
521 | "%s: Splitting iovecs from %lu to %lu" | |
522 | " [max_new %lu]\n", | |
523 | __func__, | |
524 | nr_segs, | |
525 | new_nr_segs, | |
526 | max_new_nr_segs); | |
527 | /* We must free seg_array and iovecptr */ | |
528 | to_free = 1; | |
529 | } else { | |
530 | new_nr_segs = nr_segs; | |
531 | /* use the given iovec description */ | |
532 | iovecptr = (struct iovec *)iov; | |
533 | /* There is only 1 element in the seg_array */ | |
534 | seg_count = 1; | |
535 | /* and its value is the number of segments passed in */ | |
536 | seg_array = &nr_segs; | |
537 | /* We dont have to free up anything */ | |
538 | to_free = 0; | |
539 | } | |
540 | ptr = iovecptr; | |
541 | ||
542 | gossip_debug(GOSSIP_FILE_DEBUG, | |
543 | "%s(%pU) %zd@%llu\n", | |
544 | __func__, | |
545 | handle, | |
546 | count, | |
547 | llu(*offset)); | |
548 | gossip_debug(GOSSIP_FILE_DEBUG, | |
549 | "%s(%pU): new_nr_segs: %lu, seg_count: %lu\n", | |
550 | __func__, | |
551 | handle, | |
552 | new_nr_segs, seg_count); | |
553 | ||
554 | /* PVFS2_KERNEL_DEBUG is a CFLAGS define. */ | |
555 | #ifdef PVFS2_KERNEL_DEBUG | |
556 | for (seg = 0; seg < new_nr_segs; seg++) | |
557 | gossip_debug(GOSSIP_FILE_DEBUG, | |
558 | "%s: %d) %p to %p [%d bytes]\n", | |
559 | __func__, | |
560 | (int)seg + 1, | |
561 | iovecptr[seg].iov_base, | |
562 | iovecptr[seg].iov_base + iovecptr[seg].iov_len, | |
563 | (int)iovecptr[seg].iov_len); | |
564 | for (seg = 0; seg < seg_count; seg++) | |
565 | gossip_debug(GOSSIP_FILE_DEBUG, | |
566 | "%s: %zd) %lu\n", | |
567 | __func__, | |
568 | seg + 1, | |
569 | seg_array[seg]); | |
570 | #endif | |
571 | seg = 0; | |
572 | while (total_count < count) { | |
573 | size_t each_count; | |
574 | size_t amt_complete; | |
575 | ||
576 | /* how much to transfer in this loop iteration */ | |
577 | each_count = | |
578 | (((count - total_count) > pvfs_bufmap_size_query()) ? | |
579 | pvfs_bufmap_size_query() : | |
580 | (count - total_count)); | |
581 | ||
582 | gossip_debug(GOSSIP_FILE_DEBUG, | |
583 | "%s(%pU): size of each_count(%d)\n", | |
584 | __func__, | |
585 | handle, | |
586 | (int)each_count); | |
587 | gossip_debug(GOSSIP_FILE_DEBUG, | |
588 | "%s(%pU): BEFORE wait_for_io: offset is %d\n", | |
589 | __func__, | |
590 | handle, | |
591 | (int)*offset); | |
592 | ||
593 | ret = wait_for_direct_io(type, inode, offset, ptr, | |
4d1c4404 | 594 | seg_array[seg], each_count, 0); |
5db11c21 MM |
595 | gossip_debug(GOSSIP_FILE_DEBUG, |
596 | "%s(%pU): return from wait_for_io:%d\n", | |
597 | __func__, | |
598 | handle, | |
599 | (int)ret); | |
600 | ||
601 | if (ret < 0) | |
602 | goto out; | |
603 | ||
604 | /* advance the iovec pointer */ | |
605 | ptr += seg_array[seg]; | |
606 | seg++; | |
607 | *offset += ret; | |
608 | total_count += ret; | |
609 | amt_complete = ret; | |
610 | ||
611 | gossip_debug(GOSSIP_FILE_DEBUG, | |
612 | "%s(%pU): AFTER wait_for_io: offset is %d\n", | |
613 | __func__, | |
614 | handle, | |
615 | (int)*offset); | |
616 | ||
617 | /* | |
618 | * if we got a short I/O operations, | |
619 | * fall out and return what we got so far | |
620 | */ | |
621 | if (amt_complete < each_count) | |
622 | break; | |
623 | } /*end while */ | |
624 | ||
625 | if (total_count > 0) | |
626 | ret = total_count; | |
627 | out: | |
628 | if (to_free) { | |
629 | kfree(iovecptr); | |
630 | kfree(seg_array); | |
631 | } | |
632 | if (ret > 0) { | |
633 | if (type == PVFS_IO_READ) { | |
634 | file_accessed(file); | |
635 | } else { | |
636 | SetMtimeFlag(pvfs2_inode); | |
637 | inode->i_mtime = CURRENT_TIME; | |
638 | mark_inode_dirty_sync(inode); | |
639 | } | |
640 | } | |
641 | ||
642 | gossip_debug(GOSSIP_FILE_DEBUG, | |
643 | "%s(%pU): Value(%d) returned.\n", | |
644 | __func__, | |
645 | handle, | |
646 | (int)ret); | |
647 | ||
648 | return ret; | |
649 | } | |
650 | ||
651 | /* | |
652 | * Read data from a specified offset in a file (referenced by inode). | |
653 | * Data may be placed either in a user or kernel buffer. | |
654 | */ | |
655 | ssize_t pvfs2_inode_read(struct inode *inode, | |
656 | char __user *buf, | |
657 | size_t count, | |
658 | loff_t *offset, | |
659 | loff_t readahead_size) | |
660 | { | |
661 | struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode); | |
662 | size_t bufmap_size; | |
663 | struct iovec vec; | |
664 | ssize_t ret = -EINVAL; | |
665 | ||
666 | g_pvfs2_stats.reads++; | |
667 | ||
668 | vec.iov_base = buf; | |
669 | vec.iov_len = count; | |
670 | ||
671 | bufmap_size = pvfs_bufmap_size_query(); | |
672 | if (count > bufmap_size) { | |
673 | gossip_debug(GOSSIP_FILE_DEBUG, | |
674 | "%s: count is too large (%zd/%zd)!\n", | |
675 | __func__, count, bufmap_size); | |
676 | return -EINVAL; | |
677 | } | |
678 | ||
679 | gossip_debug(GOSSIP_FILE_DEBUG, | |
680 | "%s(%pU) %zd@%llu\n", | |
681 | __func__, | |
682 | &pvfs2_inode->refn.khandle, | |
683 | count, | |
684 | llu(*offset)); | |
685 | ||
686 | ret = wait_for_direct_io(PVFS_IO_READ, inode, offset, &vec, 1, | |
4d1c4404 | 687 | count, readahead_size); |
5db11c21 MM |
688 | if (ret > 0) |
689 | *offset += ret; | |
690 | ||
691 | gossip_debug(GOSSIP_FILE_DEBUG, | |
692 | "%s(%pU): Value(%zd) returned.\n", | |
693 | __func__, | |
694 | &pvfs2_inode->refn.khandle, | |
695 | ret); | |
696 | ||
697 | return ret; | |
698 | } | |
699 | ||
700 | static ssize_t pvfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | |
701 | { | |
702 | struct file *file = iocb->ki_filp; | |
703 | loff_t pos = *(&iocb->ki_pos); | |
704 | ssize_t rc = 0; | |
705 | unsigned long nr_segs = iter->nr_segs; | |
706 | ||
707 | BUG_ON(iocb->private); | |
708 | ||
709 | gossip_debug(GOSSIP_FILE_DEBUG, "pvfs2_file_read_iter\n"); | |
710 | ||
711 | g_pvfs2_stats.reads++; | |
712 | ||
713 | rc = do_readv_writev(PVFS_IO_READ, | |
714 | file, | |
715 | &pos, | |
716 | iter->iov, | |
717 | nr_segs); | |
718 | iocb->ki_pos = pos; | |
719 | ||
720 | return rc; | |
721 | } | |
722 | ||
723 | static ssize_t pvfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) | |
724 | { | |
725 | struct file *file = iocb->ki_filp; | |
726 | loff_t pos = *(&iocb->ki_pos); | |
727 | unsigned long nr_segs = iter->nr_segs; | |
728 | ssize_t rc; | |
729 | ||
730 | BUG_ON(iocb->private); | |
731 | ||
732 | gossip_debug(GOSSIP_FILE_DEBUG, "pvfs2_file_write_iter\n"); | |
733 | ||
734 | mutex_lock(&file->f_mapping->host->i_mutex); | |
735 | ||
736 | /* Make sure generic_write_checks sees an up to date inode size. */ | |
737 | if (file->f_flags & O_APPEND) { | |
738 | rc = pvfs2_inode_getattr(file->f_mapping->host, | |
739 | PVFS_ATTR_SYS_SIZE); | |
740 | if (rc) { | |
741 | gossip_err("%s: pvfs2_inode_getattr failed, rc:%zd:.\n", | |
742 | __func__, rc); | |
743 | goto out; | |
744 | } | |
745 | } | |
746 | ||
747 | if (file->f_pos > i_size_read(file->f_mapping->host)) | |
748 | pvfs2_i_size_write(file->f_mapping->host, file->f_pos); | |
749 | ||
750 | rc = generic_write_checks(iocb, iter); | |
751 | ||
752 | if (rc <= 0) { | |
753 | gossip_err("%s: generic_write_checks failed, rc:%zd:.\n", | |
754 | __func__, rc); | |
755 | goto out; | |
756 | } | |
757 | ||
758 | rc = do_readv_writev(PVFS_IO_WRITE, | |
759 | file, | |
760 | &pos, | |
761 | iter->iov, | |
762 | nr_segs); | |
763 | if (rc < 0) { | |
764 | gossip_err("%s: do_readv_writev failed, rc:%zd:.\n", | |
765 | __func__, rc); | |
766 | goto out; | |
767 | } | |
768 | ||
769 | iocb->ki_pos = pos; | |
770 | g_pvfs2_stats.writes++; | |
771 | ||
772 | out: | |
773 | ||
774 | mutex_unlock(&file->f_mapping->host->i_mutex); | |
775 | return rc; | |
776 | } | |
777 | ||
778 | /* | |
779 | * Perform a miscellaneous operation on a file. | |
780 | */ | |
84d02150 | 781 | static long pvfs2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
5db11c21 MM |
782 | { |
783 | int ret = -ENOTTY; | |
784 | __u64 val = 0; | |
785 | unsigned long uval; | |
786 | ||
787 | gossip_debug(GOSSIP_FILE_DEBUG, | |
788 | "pvfs2_ioctl: called with cmd %d\n", | |
789 | cmd); | |
790 | ||
791 | /* | |
792 | * we understand some general ioctls on files, such as the immutable | |
793 | * and append flags | |
794 | */ | |
795 | if (cmd == FS_IOC_GETFLAGS) { | |
796 | val = 0; | |
797 | ret = pvfs2_xattr_get_default(file->f_path.dentry, | |
798 | "user.pvfs2.meta_hint", | |
799 | &val, | |
800 | sizeof(val), | |
801 | 0); | |
802 | if (ret < 0 && ret != -ENODATA) | |
803 | return ret; | |
804 | else if (ret == -ENODATA) | |
805 | val = 0; | |
806 | uval = val; | |
807 | gossip_debug(GOSSIP_FILE_DEBUG, | |
808 | "pvfs2_ioctl: FS_IOC_GETFLAGS: %llu\n", | |
809 | (unsigned long long)uval); | |
810 | return put_user(uval, (int __user *)arg); | |
811 | } else if (cmd == FS_IOC_SETFLAGS) { | |
812 | ret = 0; | |
813 | if (get_user(uval, (int __user *)arg)) | |
814 | return -EFAULT; | |
815 | /* | |
816 | * PVFS_MIRROR_FL is set internally when the mirroring mode | |
817 | * is turned on for a file. The user is not allowed to turn | |
818 | * on this bit, but the bit is present if the user first gets | |
819 | * the flags and then updates the flags with some new | |
820 | * settings. So, we ignore it in the following edit. bligon. | |
821 | */ | |
822 | if ((uval & ~PVFS_MIRROR_FL) & | |
823 | (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) { | |
824 | gossip_err("pvfs2_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n"); | |
825 | return -EINVAL; | |
826 | } | |
827 | val = uval; | |
828 | gossip_debug(GOSSIP_FILE_DEBUG, | |
829 | "pvfs2_ioctl: FS_IOC_SETFLAGS: %llu\n", | |
830 | (unsigned long long)val); | |
831 | ret = pvfs2_xattr_set_default(file->f_path.dentry, | |
832 | "user.pvfs2.meta_hint", | |
833 | &val, | |
834 | sizeof(val), | |
835 | 0, | |
836 | 0); | |
837 | } | |
838 | ||
839 | return ret; | |
840 | } | |
841 | ||
842 | /* | |
843 | * Memory map a region of a file. | |
844 | */ | |
845 | static int pvfs2_file_mmap(struct file *file, struct vm_area_struct *vma) | |
846 | { | |
847 | gossip_debug(GOSSIP_FILE_DEBUG, | |
848 | "pvfs2_file_mmap: called on %s\n", | |
849 | (file ? | |
850 | (char *)file->f_path.dentry->d_name.name : | |
851 | (char *)"Unknown")); | |
852 | ||
853 | /* set the sequential readahead hint */ | |
854 | vma->vm_flags |= VM_SEQ_READ; | |
855 | vma->vm_flags &= ~VM_RAND_READ; | |
35390803 MB |
856 | |
857 | /* Use readonly mmap since we cannot support writable maps. */ | |
858 | return generic_file_readonly_mmap(file, vma); | |
5db11c21 MM |
859 | } |
860 | ||
861 | #define mapping_nrpages(idata) ((idata)->nrpages) | |
862 | ||
863 | /* | |
864 | * Called to notify the module that there are no more references to | |
865 | * this file (i.e. no processes have it open). | |
866 | * | |
867 | * \note Not called when each file is closed. | |
868 | */ | |
84d02150 | 869 | static int pvfs2_file_release(struct inode *inode, struct file *file) |
5db11c21 MM |
870 | { |
871 | gossip_debug(GOSSIP_FILE_DEBUG, | |
872 | "pvfs2_file_release: called on %s\n", | |
873 | file->f_path.dentry->d_name.name); | |
874 | ||
875 | pvfs2_flush_inode(inode); | |
876 | ||
877 | /* | |
878 | remove all associated inode pages from the page cache and mmap | |
879 | readahead cache (if any); this forces an expensive refresh of | |
880 | data for the next caller of mmap (or 'get_block' accesses) | |
881 | */ | |
882 | if (file->f_path.dentry->d_inode && | |
883 | file->f_path.dentry->d_inode->i_mapping && | |
884 | mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) | |
885 | truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping, | |
886 | 0); | |
887 | return 0; | |
888 | } | |
889 | ||
890 | /* | |
891 | * Push all data for a specific file onto permanent storage. | |
892 | */ | |
84d02150 MM |
893 | static int pvfs2_fsync(struct file *file, |
894 | loff_t start, | |
895 | loff_t end, | |
896 | int datasync) | |
5db11c21 MM |
897 | { |
898 | int ret = -EINVAL; | |
899 | struct pvfs2_inode_s *pvfs2_inode = | |
900 | PVFS2_I(file->f_path.dentry->d_inode); | |
901 | struct pvfs2_kernel_op_s *new_op = NULL; | |
902 | ||
903 | /* required call */ | |
904 | filemap_write_and_wait_range(file->f_mapping, start, end); | |
905 | ||
906 | new_op = op_alloc(PVFS2_VFS_OP_FSYNC); | |
907 | if (!new_op) | |
908 | return -ENOMEM; | |
909 | new_op->upcall.req.fsync.refn = pvfs2_inode->refn; | |
910 | ||
911 | ret = service_operation(new_op, | |
912 | "pvfs2_fsync", | |
913 | get_interruptible_flag(file->f_path.dentry->d_inode)); | |
914 | ||
915 | gossip_debug(GOSSIP_FILE_DEBUG, | |
916 | "pvfs2_fsync got return value of %d\n", | |
917 | ret); | |
918 | ||
919 | op_release(new_op); | |
920 | ||
921 | pvfs2_flush_inode(file->f_path.dentry->d_inode); | |
922 | return ret; | |
923 | } | |
924 | ||
925 | /* | |
926 | * Change the file pointer position for an instance of an open file. | |
927 | * | |
928 | * \note If .llseek is overriden, we must acquire lock as described in | |
929 | * Documentation/filesystems/Locking. | |
930 | * | |
931 | * Future upgrade could support SEEK_DATA and SEEK_HOLE but would | |
932 | * require much changes to the FS | |
933 | */ | |
84d02150 | 934 | static loff_t pvfs2_file_llseek(struct file *file, loff_t offset, int origin) |
5db11c21 MM |
935 | { |
936 | int ret = -EINVAL; | |
937 | struct inode *inode = file->f_path.dentry->d_inode; | |
938 | ||
939 | if (!inode) { | |
940 | gossip_err("pvfs2_file_llseek: invalid inode (NULL)\n"); | |
941 | return ret; | |
942 | } | |
943 | ||
944 | if (origin == PVFS2_SEEK_END) { | |
945 | /* | |
946 | * revalidate the inode's file size. | |
947 | * NOTE: We are only interested in file size here, | |
948 | * so we set mask accordingly. | |
949 | */ | |
950 | ret = pvfs2_inode_getattr(inode, PVFS_ATTR_SYS_SIZE); | |
951 | if (ret) { | |
952 | gossip_debug(GOSSIP_FILE_DEBUG, | |
953 | "%s:%s:%d calling make bad inode\n", | |
954 | __FILE__, | |
955 | __func__, | |
956 | __LINE__); | |
957 | pvfs2_make_bad_inode(inode); | |
958 | return ret; | |
959 | } | |
960 | } | |
961 | ||
962 | gossip_debug(GOSSIP_FILE_DEBUG, | |
963 | "pvfs2_file_llseek: offset is %ld | origin is %d | " | |
964 | "inode size is %lu\n", | |
965 | (long)offset, | |
966 | origin, | |
967 | (unsigned long)file->f_path.dentry->d_inode->i_size); | |
968 | ||
969 | return generic_file_llseek(file, offset, origin); | |
970 | } | |
971 | ||
972 | /* | |
973 | * Support local locks (locks that only this kernel knows about) | |
974 | * if Orangefs was mounted -o local_lock. | |
975 | */ | |
84d02150 | 976 | static int pvfs2_lock(struct file *filp, int cmd, struct file_lock *fl) |
5db11c21 | 977 | { |
f957ae2d | 978 | int rc = -EINVAL; |
5db11c21 MM |
979 | |
980 | if (PVFS2_SB(filp->f_inode->i_sb)->flags & PVFS2_OPT_LOCAL_LOCK) { | |
981 | if (cmd == F_GETLK) { | |
982 | rc = 0; | |
983 | posix_test_lock(filp, fl); | |
984 | } else { | |
985 | rc = posix_lock_file(filp, fl, NULL); | |
986 | } | |
987 | } | |
988 | ||
989 | return rc; | |
990 | } | |
991 | ||
992 | /** PVFS2 implementation of VFS file operations */ | |
993 | const struct file_operations pvfs2_file_operations = { | |
994 | .llseek = pvfs2_file_llseek, | |
995 | .read_iter = pvfs2_file_read_iter, | |
996 | .write_iter = pvfs2_file_write_iter, | |
997 | .lock = pvfs2_lock, | |
998 | .unlocked_ioctl = pvfs2_ioctl, | |
999 | .mmap = pvfs2_file_mmap, | |
1000 | .open = generic_file_open, | |
1001 | .release = pvfs2_file_release, | |
1002 | .fsync = pvfs2_fsync, | |
1003 | }; |