]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
124e68e7 | 2 | |
3d14c5d2 | 3 | #include <linux/module.h> |
124e68e7 | 4 | #include <linux/sched.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
124e68e7 SW |
6 | #include <linux/file.h> |
7 | #include <linux/namei.h> | |
8 | #include <linux/writeback.h> | |
9 | ||
10 | #include "super.h" | |
11 | #include "mds_client.h" | |
12 | ||
13 | /* | |
14 | * Ceph file operations | |
15 | * | |
16 | * Implement basic open/close functionality, and implement | |
17 | * read/write. | |
18 | * | |
19 | * We implement three modes of file I/O: | |
20 | * - buffered uses the generic_file_aio_{read,write} helpers | |
21 | * | |
22 | * - synchronous is used when there is multi-client read/write | |
23 | * sharing, avoids the page cache, and synchronously waits for an | |
24 | * ack from the OSD. | |
25 | * | |
26 | * - direct io takes the variant of the sync path that references | |
27 | * user pages directly. | |
28 | * | |
29 | * fsync() flushes and waits on dirty pages, but just queues metadata | |
30 | * for writeback: since the MDS can recover size and mtime there is no | |
31 | * need to wait for MDS acknowledgement. | |
32 | */ | |
33 | ||
34 | ||
35 | /* | |
36 | * Prepare an open request. Preallocate ceph_cap to avoid an | |
37 | * inopportune ENOMEM later. | |
38 | */ | |
39 | static struct ceph_mds_request * | |
40 | prepare_open_request(struct super_block *sb, int flags, int create_mode) | |
41 | { | |
3d14c5d2 YS |
42 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
43 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
124e68e7 SW |
44 | struct ceph_mds_request *req; |
45 | int want_auth = USE_ANY_MDS; | |
46 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; | |
47 | ||
48 | if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) | |
49 | want_auth = USE_AUTH_MDS; | |
50 | ||
51 | req = ceph_mdsc_create_request(mdsc, op, want_auth); | |
52 | if (IS_ERR(req)) | |
53 | goto out; | |
54 | req->r_fmode = ceph_flags_to_mode(flags); | |
55 | req->r_args.open.flags = cpu_to_le32(flags); | |
56 | req->r_args.open.mode = cpu_to_le32(create_mode); | |
124e68e7 SW |
57 | out: |
58 | return req; | |
59 | } | |
60 | ||
61 | /* | |
62 | * initialize private struct file data. | |
63 | * if we fail, clean up by dropping fmode reference on the ceph_inode | |
64 | */ | |
65 | static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | |
66 | { | |
67 | struct ceph_file_info *cf; | |
68 | int ret = 0; | |
69 | ||
70 | switch (inode->i_mode & S_IFMT) { | |
71 | case S_IFREG: | |
72 | case S_IFDIR: | |
73 | dout("init_file %p %p 0%o (regular)\n", inode, file, | |
74 | inode->i_mode); | |
75 | cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); | |
76 | if (cf == NULL) { | |
77 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
78 | return -ENOMEM; | |
79 | } | |
80 | cf->fmode = fmode; | |
81 | cf->next_offset = 2; | |
82 | file->private_data = cf; | |
83 | BUG_ON(inode->i_fop->release != ceph_release); | |
84 | break; | |
85 | ||
86 | case S_IFLNK: | |
87 | dout("init_file %p %p 0%o (symlink)\n", inode, file, | |
88 | inode->i_mode); | |
89 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
90 | break; | |
91 | ||
92 | default: | |
93 | dout("init_file %p %p 0%o (special)\n", inode, file, | |
94 | inode->i_mode); | |
95 | /* | |
96 | * we need to drop the open ref now, since we don't | |
97 | * have .release set to ceph_release. | |
98 | */ | |
99 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
100 | BUG_ON(inode->i_fop->release == ceph_release); | |
101 | ||
102 | /* call the proper open fop */ | |
103 | ret = inode->i_fop->open(inode, file); | |
104 | } | |
105 | return ret; | |
106 | } | |
107 | ||
108 | /* | |
109 | * If the filp already has private_data, that means the file was | |
110 | * already opened by intent during lookup, and we do nothing. | |
111 | * | |
112 | * If we already have the requisite capabilities, we can satisfy | |
113 | * the open request locally (no need to request new caps from the | |
114 | * MDS). We do, however, need to inform the MDS (asynchronously) | |
115 | * if our wanted caps set expands. | |
116 | */ | |
117 | int ceph_open(struct inode *inode, struct file *file) | |
118 | { | |
119 | struct ceph_inode_info *ci = ceph_inode(inode); | |
3d14c5d2 YS |
120 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); |
121 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
124e68e7 SW |
122 | struct ceph_mds_request *req; |
123 | struct ceph_file_info *cf = file->private_data; | |
5f21c96d | 124 | struct inode *parent_inode = NULL; |
124e68e7 SW |
125 | int err; |
126 | int flags, fmode, wanted; | |
127 | ||
128 | if (cf) { | |
129 | dout("open file %p is already opened\n", file); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ | |
134 | flags = file->f_flags & ~(O_CREAT|O_EXCL); | |
135 | if (S_ISDIR(inode->i_mode)) | |
136 | flags = O_DIRECTORY; /* mds likes to know */ | |
137 | ||
138 | dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, | |
139 | ceph_vinop(inode), file, flags, file->f_flags); | |
140 | fmode = ceph_flags_to_mode(flags); | |
141 | wanted = ceph_caps_for_mode(fmode); | |
142 | ||
143 | /* snapped files are read-only */ | |
144 | if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) | |
145 | return -EROFS; | |
146 | ||
147 | /* trivially open snapdir */ | |
148 | if (ceph_snap(inode) == CEPH_SNAPDIR) { | |
be655596 | 149 | spin_lock(&ci->i_ceph_lock); |
124e68e7 | 150 | __ceph_get_fmode(ci, fmode); |
be655596 | 151 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
152 | return ceph_init_file(inode, file, fmode); |
153 | } | |
154 | ||
155 | /* | |
7421ab80 SW |
156 | * No need to block if we have caps on the auth MDS (for |
157 | * write) or any MDS (for read). Update wanted set | |
124e68e7 SW |
158 | * asynchronously. |
159 | */ | |
be655596 | 160 | spin_lock(&ci->i_ceph_lock); |
7421ab80 SW |
161 | if (__ceph_is_any_real_caps(ci) && |
162 | (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { | |
124e68e7 SW |
163 | int mds_wanted = __ceph_caps_mds_wanted(ci); |
164 | int issued = __ceph_caps_issued(ci, NULL); | |
165 | ||
166 | dout("open %p fmode %d want %s issued %s using existing\n", | |
167 | inode, fmode, ceph_cap_string(wanted), | |
168 | ceph_cap_string(issued)); | |
169 | __ceph_get_fmode(ci, fmode); | |
be655596 | 170 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
171 | |
172 | /* adjust wanted? */ | |
173 | if ((issued & wanted) != wanted && | |
174 | (mds_wanted & wanted) != wanted && | |
175 | ceph_snap(inode) != CEPH_SNAPDIR) | |
176 | ceph_check_caps(ci, 0, NULL); | |
177 | ||
178 | return ceph_init_file(inode, file, fmode); | |
179 | } else if (ceph_snap(inode) != CEPH_NOSNAP && | |
180 | (ci->i_snap_caps & wanted) == wanted) { | |
181 | __ceph_get_fmode(ci, fmode); | |
be655596 | 182 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
183 | return ceph_init_file(inode, file, fmode); |
184 | } | |
be655596 | 185 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
186 | |
187 | dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); | |
188 | req = prepare_open_request(inode->i_sb, flags, 0); | |
189 | if (IS_ERR(req)) { | |
190 | err = PTR_ERR(req); | |
191 | goto out; | |
192 | } | |
70b666c3 SW |
193 | req->r_inode = inode; |
194 | ihold(inode); | |
124e68e7 | 195 | req->r_num_caps = 1; |
5f21c96d SW |
196 | if (flags & (O_CREAT|O_TRUNC)) |
197 | parent_inode = ceph_get_dentry_parent_inode(file->f_dentry); | |
124e68e7 | 198 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); |
5f21c96d | 199 | iput(parent_inode); |
124e68e7 SW |
200 | if (!err) |
201 | err = ceph_init_file(inode, file, req->r_fmode); | |
202 | ceph_mdsc_put_request(req); | |
203 | dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); | |
204 | out: | |
205 | return err; | |
206 | } | |
207 | ||
208 | ||
209 | /* | |
210 | * Do a lookup + open with a single request. | |
211 | * | |
212 | * If this succeeds, but some subsequent check in the vfs | |
213 | * may_open() fails, the struct *file gets cleaned up (i.e. | |
214 | * ceph_release gets called). So fear not! | |
215 | */ | |
2d83bde9 MS |
216 | struct file *ceph_lookup_open(struct inode *dir, struct dentry *dentry, |
217 | struct opendata *od, unsigned flags, umode_t mode) | |
124e68e7 | 218 | { |
3d14c5d2 YS |
219 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
220 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2d83bde9 | 221 | struct file *file = NULL; |
124e68e7 | 222 | struct ceph_mds_request *req; |
468640e3 | 223 | struct dentry *ret; |
124e68e7 | 224 | int err; |
124e68e7 SW |
225 | |
226 | dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", | |
227 | dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); | |
228 | ||
229 | /* do the open */ | |
230 | req = prepare_open_request(dir->i_sb, flags, mode); | |
231 | if (IS_ERR(req)) | |
7e34bc52 | 232 | return ERR_CAST(req); |
124e68e7 SW |
233 | req->r_dentry = dget(dentry); |
234 | req->r_num_caps = 2; | |
235 | if (flags & O_CREAT) { | |
236 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
237 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
238 | } | |
239 | req->r_locked_dir = dir; /* caller holds dir->i_mutex */ | |
acda7657 SW |
240 | err = ceph_mdsc_do_request(mdsc, |
241 | (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, | |
242 | req); | |
468640e3 SW |
243 | err = ceph_handle_snapdir(req, dentry, err); |
244 | if (err) | |
245 | goto out; | |
246 | if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) | |
124e68e7 | 247 | err = ceph_handle_notrace_create(dir, dentry); |
468640e3 SW |
248 | if (err) |
249 | goto out; | |
2d83bde9 | 250 | file = finish_open(od, req->r_dentry, ceph_open); |
468640e3 SW |
251 | if (IS_ERR(file)) |
252 | err = PTR_ERR(file); | |
253 | out: | |
254 | ret = ceph_finish_lookup(req, dentry, err); | |
124e68e7 | 255 | ceph_mdsc_put_request(req); |
468640e3 | 256 | dout("ceph_lookup_open result=%p\n", ret); |
2d83bde9 MS |
257 | |
258 | if (IS_ERR(ret)) | |
259 | return ERR_CAST(ret); | |
260 | ||
261 | dput(ret); | |
262 | return err ? ERR_PTR(err) : file; | |
124e68e7 SW |
263 | } |
264 | ||
265 | int ceph_release(struct inode *inode, struct file *file) | |
266 | { | |
267 | struct ceph_inode_info *ci = ceph_inode(inode); | |
268 | struct ceph_file_info *cf = file->private_data; | |
269 | ||
270 | dout("release inode %p file %p\n", inode, file); | |
271 | ceph_put_fmode(ci, cf->fmode); | |
272 | if (cf->last_readdir) | |
273 | ceph_mdsc_put_request(cf->last_readdir); | |
274 | kfree(cf->last_name); | |
275 | kfree(cf->dir_info); | |
276 | dput(cf->dentry); | |
277 | kmem_cache_free(ceph_file_cachep, cf); | |
195d3ce2 SW |
278 | |
279 | /* wake up anyone waiting for caps on this inode */ | |
03066f23 | 280 | wake_up_all(&ci->i_cap_wq); |
124e68e7 SW |
281 | return 0; |
282 | } | |
283 | ||
124e68e7 SW |
284 | /* |
285 | * Read a range of bytes striped over one or more objects. Iterate over | |
286 | * objects we stripe over. (That's not atomic, but good enough for now.) | |
287 | * | |
288 | * If we get a short result from the OSD, check against i_size; we need to | |
289 | * only return a short read to the caller if we hit EOF. | |
290 | */ | |
291 | static int striped_read(struct inode *inode, | |
292 | u64 off, u64 len, | |
6a026589 | 293 | struct page **pages, int num_pages, |
c3cd6283 | 294 | int *checkeof, bool o_direct, |
ab226e21 | 295 | unsigned long buf_align) |
124e68e7 | 296 | { |
3d14c5d2 | 297 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
124e68e7 SW |
298 | struct ceph_inode_info *ci = ceph_inode(inode); |
299 | u64 pos, this_len; | |
b7495fc2 | 300 | int io_align, page_align; |
124e68e7 SW |
301 | int left, pages_left; |
302 | int read; | |
303 | struct page **page_pos; | |
304 | int ret; | |
305 | bool hit_stripe, was_short; | |
306 | ||
307 | /* | |
308 | * we may need to do multiple reads. not atomic, unfortunately. | |
309 | */ | |
310 | pos = off; | |
311 | left = len; | |
312 | page_pos = pages; | |
313 | pages_left = num_pages; | |
314 | read = 0; | |
b7495fc2 | 315 | io_align = off & ~PAGE_MASK; |
124e68e7 SW |
316 | |
317 | more: | |
c3cd6283 | 318 | if (o_direct) |
ab226e21 | 319 | page_align = (pos - io_align + buf_align) & ~PAGE_MASK; |
b7495fc2 SW |
320 | else |
321 | page_align = pos & ~PAGE_MASK; | |
124e68e7 | 322 | this_len = left; |
3d14c5d2 | 323 | ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), |
124e68e7 SW |
324 | &ci->i_layout, pos, &this_len, |
325 | ci->i_truncate_seq, | |
326 | ci->i_truncate_size, | |
b7495fc2 | 327 | page_pos, pages_left, page_align); |
124e68e7 SW |
328 | if (ret == -ENOENT) |
329 | ret = 0; | |
0e98728f SW |
330 | hit_stripe = this_len < left; |
331 | was_short = ret >= 0 && ret < this_len; | |
124e68e7 SW |
332 | dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, |
333 | ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); | |
334 | ||
335 | if (ret > 0) { | |
773e9b44 | 336 | int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; |
124e68e7 SW |
337 | |
338 | if (read < pos - off) { | |
339 | dout(" zero gap %llu to %llu\n", off + read, pos); | |
773e9b44 | 340 | ceph_zero_page_vector_range(page_align + read, |
3d14c5d2 | 341 | pos - off - read, pages); |
124e68e7 SW |
342 | } |
343 | pos += ret; | |
344 | read = pos - off; | |
345 | left -= ret; | |
346 | page_pos += didpages; | |
347 | pages_left -= didpages; | |
348 | ||
349 | /* hit stripe? */ | |
350 | if (left && hit_stripe) | |
351 | goto more; | |
352 | } | |
353 | ||
354 | if (was_short) { | |
c3cd6283 SW |
355 | /* did we bounce off eof? */ |
356 | if (pos + left > inode->i_size) | |
357 | *checkeof = 1; | |
358 | ||
359 | /* zero trailing bytes (inside i_size) */ | |
360 | if (left > 0 && pos < inode->i_size) { | |
361 | if (pos + left > inode->i_size) | |
362 | left = inode->i_size - pos; | |
363 | ||
364 | dout("zero tail %d\n", left); | |
773e9b44 | 365 | ceph_zero_page_vector_range(page_align + read, left, |
3d14c5d2 | 366 | pages); |
c3cd6283 | 367 | read += left; |
124e68e7 | 368 | } |
124e68e7 SW |
369 | } |
370 | ||
124e68e7 SW |
371 | if (ret >= 0) |
372 | ret = read; | |
373 | dout("striped_read returns %d\n", ret); | |
374 | return ret; | |
375 | } | |
376 | ||
377 | /* | |
378 | * Completely synchronous read and write methods. Direct from __user | |
379 | * buffer to osd, or directly to user pages (if O_DIRECT). | |
380 | * | |
381 | * If the read spans object boundary, just do multiple reads. | |
382 | */ | |
383 | static ssize_t ceph_sync_read(struct file *file, char __user *data, | |
6a026589 | 384 | unsigned len, loff_t *poff, int *checkeof) |
124e68e7 SW |
385 | { |
386 | struct inode *inode = file->f_dentry->d_inode; | |
387 | struct page **pages; | |
388 | u64 off = *poff; | |
ab226e21 | 389 | int num_pages, ret; |
124e68e7 SW |
390 | |
391 | dout("sync_read on file %p %llu~%u %s\n", file, off, len, | |
392 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | |
393 | ||
ab226e21 HC |
394 | if (file->f_flags & O_DIRECT) { |
395 | num_pages = calc_pages_for((unsigned long)data, len); | |
b6aa5901 | 396 | pages = ceph_get_direct_page_vector(data, num_pages, true); |
ab226e21 HC |
397 | } else { |
398 | num_pages = calc_pages_for(off, len); | |
34d23762 | 399 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); |
ab226e21 | 400 | } |
124e68e7 SW |
401 | if (IS_ERR(pages)) |
402 | return PTR_ERR(pages); | |
403 | ||
e98b6fed SW |
404 | /* |
405 | * flush any page cache pages in this range. this | |
406 | * will make concurrent normal and sync io slow, | |
407 | * but it will at least behave sensibly when they are | |
408 | * in sequence. | |
409 | */ | |
29065a51 YS |
410 | ret = filemap_write_and_wait(inode->i_mapping); |
411 | if (ret < 0) | |
412 | goto done; | |
413 | ||
b7495fc2 | 414 | ret = striped_read(inode, off, len, pages, num_pages, checkeof, |
ab226e21 HC |
415 | file->f_flags & O_DIRECT, |
416 | (unsigned long)data & ~PAGE_MASK); | |
124e68e7 SW |
417 | |
418 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) | |
3d14c5d2 | 419 | ret = ceph_copy_page_vector_to_user(pages, data, off, ret); |
124e68e7 SW |
420 | if (ret >= 0) |
421 | *poff = off + ret; | |
422 | ||
29065a51 | 423 | done: |
124e68e7 | 424 | if (file->f_flags & O_DIRECT) |
b6aa5901 | 425 | ceph_put_page_vector(pages, num_pages, true); |
124e68e7 SW |
426 | else |
427 | ceph_release_page_vector(pages, num_pages); | |
428 | dout("sync_read result %d\n", ret); | |
429 | return ret; | |
430 | } | |
431 | ||
432 | /* | |
433 | * Write commit callback, called if we requested both an ACK and | |
434 | * ONDISK commit reply from the OSD. | |
435 | */ | |
436 | static void sync_write_commit(struct ceph_osd_request *req, | |
437 | struct ceph_msg *msg) | |
438 | { | |
439 | struct ceph_inode_info *ci = ceph_inode(req->r_inode); | |
440 | ||
441 | dout("sync_write_commit %p tid %llu\n", req, req->r_tid); | |
442 | spin_lock(&ci->i_unsafe_lock); | |
443 | list_del_init(&req->r_unsafe_item); | |
444 | spin_unlock(&ci->i_unsafe_lock); | |
445 | ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); | |
446 | } | |
447 | ||
448 | /* | |
449 | * Synchronous write, straight from __user pointer or user pages (if | |
450 | * O_DIRECT). | |
451 | * | |
452 | * If write spans object boundary, just do multiple writes. (For a | |
453 | * correct atomic write, we should e.g. take write locks on all | |
454 | * objects, rollback on failure, etc.) | |
455 | */ | |
456 | static ssize_t ceph_sync_write(struct file *file, const char __user *data, | |
457 | size_t left, loff_t *offset) | |
458 | { | |
459 | struct inode *inode = file->f_dentry->d_inode; | |
460 | struct ceph_inode_info *ci = ceph_inode(inode); | |
3d14c5d2 | 461 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
124e68e7 SW |
462 | struct ceph_osd_request *req; |
463 | struct page **pages; | |
464 | int num_pages; | |
465 | long long unsigned pos; | |
466 | u64 len; | |
467 | int written = 0; | |
468 | int flags; | |
469 | int do_sync = 0; | |
470 | int check_caps = 0; | |
b7495fc2 | 471 | int page_align, io_align; |
ab226e21 | 472 | unsigned long buf_align; |
124e68e7 SW |
473 | int ret; |
474 | struct timespec mtime = CURRENT_TIME; | |
475 | ||
476 | if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP) | |
477 | return -EROFS; | |
478 | ||
479 | dout("sync_write on file %p %lld~%u %s\n", file, *offset, | |
480 | (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | |
481 | ||
482 | if (file->f_flags & O_APPEND) | |
483 | pos = i_size_read(inode); | |
484 | else | |
485 | pos = *offset; | |
486 | ||
29065a51 YS |
487 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); |
488 | if (ret < 0) | |
489 | return ret; | |
490 | ||
491 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
492 | pos >> PAGE_CACHE_SHIFT, | |
493 | (pos + left) >> PAGE_CACHE_SHIFT); | |
494 | if (ret < 0) | |
495 | dout("invalidate_inode_pages2_range returned %d\n", ret); | |
496 | ||
124e68e7 SW |
497 | flags = CEPH_OSD_FLAG_ORDERSNAP | |
498 | CEPH_OSD_FLAG_ONDISK | | |
499 | CEPH_OSD_FLAG_WRITE; | |
500 | if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) | |
501 | flags |= CEPH_OSD_FLAG_ACK; | |
502 | else | |
503 | do_sync = 1; | |
504 | ||
505 | /* | |
506 | * we may need to do multiple writes here if we span an object | |
507 | * boundary. this isn't atomic, unfortunately. :( | |
508 | */ | |
509 | more: | |
d7f124f1 SW |
510 | io_align = pos & ~PAGE_MASK; |
511 | buf_align = (unsigned long)data & ~PAGE_MASK; | |
124e68e7 | 512 | len = left; |
ab226e21 | 513 | if (file->f_flags & O_DIRECT) { |
b7495fc2 SW |
514 | /* write from beginning of first page, regardless of |
515 | io alignment */ | |
ab226e21 HC |
516 | page_align = (pos - io_align + buf_align) & ~PAGE_MASK; |
517 | num_pages = calc_pages_for((unsigned long)data, len); | |
518 | } else { | |
b7495fc2 | 519 | page_align = pos & ~PAGE_MASK; |
ab226e21 HC |
520 | num_pages = calc_pages_for(pos, len); |
521 | } | |
3d14c5d2 | 522 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, |
124e68e7 SW |
523 | ceph_vino(inode), pos, &len, |
524 | CEPH_OSD_OP_WRITE, flags, | |
525 | ci->i_snap_realm->cached_context, | |
526 | do_sync, | |
527 | ci->i_truncate_seq, ci->i_truncate_size, | |
b7495fc2 | 528 | &mtime, false, 2, page_align); |
a79832f2 SW |
529 | if (!req) |
530 | return -ENOMEM; | |
124e68e7 | 531 | |
124e68e7 | 532 | if (file->f_flags & O_DIRECT) { |
b6aa5901 | 533 | pages = ceph_get_direct_page_vector(data, num_pages, false); |
124e68e7 SW |
534 | if (IS_ERR(pages)) { |
535 | ret = PTR_ERR(pages); | |
536 | goto out; | |
537 | } | |
538 | ||
539 | /* | |
540 | * throw out any page cache pages in this range. this | |
541 | * may block. | |
542 | */ | |
213c99ee | 543 | truncate_inode_pages_range(inode->i_mapping, pos, |
5c6a2cdb | 544 | (pos+len) | (PAGE_CACHE_SIZE-1)); |
124e68e7 | 545 | } else { |
34d23762 | 546 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); |
124e68e7 SW |
547 | if (IS_ERR(pages)) { |
548 | ret = PTR_ERR(pages); | |
549 | goto out; | |
550 | } | |
3d14c5d2 | 551 | ret = ceph_copy_user_to_page_vector(pages, data, pos, len); |
124e68e7 SW |
552 | if (ret < 0) { |
553 | ceph_release_page_vector(pages, num_pages); | |
554 | goto out; | |
555 | } | |
556 | ||
557 | if ((file->f_flags & O_SYNC) == 0) { | |
558 | /* get a second commit callback */ | |
559 | req->r_safe_callback = sync_write_commit; | |
560 | req->r_own_pages = 1; | |
561 | } | |
562 | } | |
563 | req->r_pages = pages; | |
564 | req->r_num_pages = num_pages; | |
565 | req->r_inode = inode; | |
566 | ||
3d14c5d2 | 567 | ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); |
124e68e7 SW |
568 | if (!ret) { |
569 | if (req->r_safe_callback) { | |
570 | /* | |
571 | * Add to inode unsafe list only after we | |
572 | * start_request so that a tid has been assigned. | |
573 | */ | |
574 | spin_lock(&ci->i_unsafe_lock); | |
49bcb932 HC |
575 | list_add_tail(&req->r_unsafe_item, |
576 | &ci->i_unsafe_writes); | |
124e68e7 SW |
577 | spin_unlock(&ci->i_unsafe_lock); |
578 | ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); | |
579 | } | |
78a25565 | 580 | |
3d14c5d2 | 581 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); |
78a25565 HC |
582 | if (ret < 0 && req->r_safe_callback) { |
583 | spin_lock(&ci->i_unsafe_lock); | |
584 | list_del_init(&req->r_unsafe_item); | |
585 | spin_unlock(&ci->i_unsafe_lock); | |
586 | ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); | |
587 | } | |
124e68e7 SW |
588 | } |
589 | ||
590 | if (file->f_flags & O_DIRECT) | |
b6aa5901 | 591 | ceph_put_page_vector(pages, num_pages, false); |
124e68e7 SW |
592 | else if (file->f_flags & O_SYNC) |
593 | ceph_release_page_vector(pages, num_pages); | |
594 | ||
595 | out: | |
596 | ceph_osdc_put_request(req); | |
597 | if (ret == 0) { | |
598 | pos += len; | |
599 | written += len; | |
600 | left -= len; | |
d7f124f1 | 601 | data += written; |
124e68e7 SW |
602 | if (left) |
603 | goto more; | |
604 | ||
605 | ret = written; | |
606 | *offset = pos; | |
607 | if (pos > i_size_read(inode)) | |
608 | check_caps = ceph_inode_set_size(inode, pos); | |
609 | if (check_caps) | |
610 | ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, | |
611 | NULL); | |
612 | } | |
613 | return ret; | |
614 | } | |
615 | ||
616 | /* | |
617 | * Wrap generic_file_aio_read with checks for cap bits on the inode. | |
618 | * Atomically grab references, so that those bits are not released | |
619 | * back to the MDS mid-read. | |
620 | * | |
621 | * Hmm, the sync read case isn't actually async... should it be? | |
622 | */ | |
623 | static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, | |
624 | unsigned long nr_segs, loff_t pos) | |
625 | { | |
626 | struct file *filp = iocb->ki_filp; | |
2962507c | 627 | struct ceph_file_info *fi = filp->private_data; |
124e68e7 SW |
628 | loff_t *ppos = &iocb->ki_pos; |
629 | size_t len = iov->iov_len; | |
630 | struct inode *inode = filp->f_dentry->d_inode; | |
631 | struct ceph_inode_info *ci = ceph_inode(inode); | |
cd84db6e | 632 | void __user *base = iov->iov_base; |
124e68e7 | 633 | ssize_t ret; |
2962507c | 634 | int want, got = 0; |
6a026589 | 635 | int checkeof = 0, read = 0; |
124e68e7 SW |
636 | |
637 | dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", | |
638 | inode, ceph_vinop(inode), pos, (unsigned)len, inode); | |
6a026589 | 639 | again: |
124e68e7 | 640 | __ceph_do_pending_vmtruncate(inode); |
2962507c SW |
641 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
642 | want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; | |
643 | else | |
644 | want = CEPH_CAP_FILE_CACHE; | |
645 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1); | |
124e68e7 SW |
646 | if (ret < 0) |
647 | goto out; | |
648 | dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", | |
649 | inode, ceph_vinop(inode), pos, (unsigned)len, | |
650 | ceph_cap_string(got)); | |
651 | ||
2962507c | 652 | if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || |
124e68e7 | 653 | (iocb->ki_filp->f_flags & O_DIRECT) || |
4918b6d1 SW |
654 | (inode->i_sb->s_flags & MS_SYNCHRONOUS) || |
655 | (fi->flags & CEPH_F_SYNC)) | |
124e68e7 | 656 | /* hmm, this isn't really async... */ |
6a026589 | 657 | ret = ceph_sync_read(filp, base, len, ppos, &checkeof); |
124e68e7 SW |
658 | else |
659 | ret = generic_file_aio_read(iocb, iov, nr_segs, pos); | |
660 | ||
661 | out: | |
662 | dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", | |
663 | inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); | |
664 | ceph_put_cap_refs(ci, got); | |
6a026589 SW |
665 | |
666 | if (checkeof && ret >= 0) { | |
667 | int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); | |
668 | ||
669 | /* hit EOF or hole? */ | |
670 | if (statret == 0 && *ppos < inode->i_size) { | |
c3cd6283 | 671 | dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size); |
6a026589 SW |
672 | read += ret; |
673 | base += ret; | |
674 | len -= ret; | |
675 | checkeof = 0; | |
676 | goto again; | |
677 | } | |
678 | } | |
679 | if (ret >= 0) | |
680 | ret += read; | |
681 | ||
124e68e7 SW |
682 | return ret; |
683 | } | |
684 | ||
685 | /* | |
686 | * Take cap references to avoid releasing caps to MDS mid-write. | |
687 | * | |
688 | * If we are synchronous, and write with an old snap context, the OSD | |
689 | * may return EOLDSNAPC. In that case, retry the write.. _after_ | |
690 | * dropping our cap refs and allowing the pending snap to logically | |
691 | * complete _before_ this write occurs. | |
692 | * | |
693 | * If we are near ENOSPC, write synchronously. | |
694 | */ | |
695 | static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
696 | unsigned long nr_segs, loff_t pos) | |
697 | { | |
698 | struct file *file = iocb->ki_filp; | |
33caad32 | 699 | struct ceph_file_info *fi = file->private_data; |
124e68e7 SW |
700 | struct inode *inode = file->f_dentry->d_inode; |
701 | struct ceph_inode_info *ci = ceph_inode(inode); | |
3d14c5d2 YS |
702 | struct ceph_osd_client *osdc = |
703 | &ceph_sb_to_client(inode->i_sb)->client->osdc; | |
124e68e7 | 704 | loff_t endoff = pos + iov->iov_len; |
33caad32 | 705 | int want, got = 0; |
88d892a3 | 706 | int ret, err; |
124e68e7 SW |
707 | |
708 | if (ceph_snap(inode) != CEPH_NOSNAP) | |
709 | return -EROFS; | |
710 | ||
711 | retry_snap: | |
712 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) | |
713 | return -ENOSPC; | |
714 | __ceph_do_pending_vmtruncate(inode); | |
715 | dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", | |
716 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | |
717 | inode->i_size); | |
33caad32 SW |
718 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
719 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; | |
720 | else | |
721 | want = CEPH_CAP_FILE_BUFFER; | |
722 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); | |
124e68e7 | 723 | if (ret < 0) |
d8de9ab6 | 724 | goto out_put; |
124e68e7 SW |
725 | |
726 | dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", | |
727 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | |
728 | ceph_cap_string(got)); | |
729 | ||
33caad32 | 730 | if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || |
124e68e7 | 731 | (iocb->ki_filp->f_flags & O_DIRECT) || |
4918b6d1 SW |
732 | (inode->i_sb->s_flags & MS_SYNCHRONOUS) || |
733 | (fi->flags & CEPH_F_SYNC)) { | |
124e68e7 SW |
734 | ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, |
735 | &iocb->ki_pos); | |
736 | } else { | |
d8de9ab6 SW |
737 | /* |
738 | * buffered write; drop Fw early to avoid slow | |
739 | * revocation if we get stuck on balance_dirty_pages | |
740 | */ | |
741 | int dirty; | |
124e68e7 | 742 | |
be655596 | 743 | spin_lock(&ci->i_ceph_lock); |
d8de9ab6 | 744 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); |
be655596 | 745 | spin_unlock(&ci->i_ceph_lock); |
d8de9ab6 | 746 | ceph_put_cap_refs(ci, got); |
124e68e7 | 747 | |
d8de9ab6 | 748 | ret = generic_file_aio_write(iocb, iov, nr_segs, pos); |
124e68e7 SW |
749 | if ((ret >= 0 || ret == -EIOCBQUEUED) && |
750 | ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) | |
88d892a3 | 751 | || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { |
8018ab05 | 752 | err = vfs_fsync_range(file, pos, pos + ret - 1, 1); |
88d892a3 YS |
753 | if (err < 0) |
754 | ret = err; | |
755 | } | |
d8de9ab6 SW |
756 | |
757 | if (dirty) | |
758 | __mark_inode_dirty(inode, dirty); | |
759 | goto out; | |
124e68e7 | 760 | } |
d8de9ab6 | 761 | |
124e68e7 | 762 | if (ret >= 0) { |
fca65b4a | 763 | int dirty; |
be655596 | 764 | spin_lock(&ci->i_ceph_lock); |
fca65b4a | 765 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); |
be655596 | 766 | spin_unlock(&ci->i_ceph_lock); |
fca65b4a SW |
767 | if (dirty) |
768 | __mark_inode_dirty(inode, dirty); | |
124e68e7 SW |
769 | } |
770 | ||
d8de9ab6 | 771 | out_put: |
124e68e7 SW |
772 | dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", |
773 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | |
774 | ceph_cap_string(got)); | |
775 | ceph_put_cap_refs(ci, got); | |
776 | ||
d8de9ab6 | 777 | out: |
124e68e7 SW |
778 | if (ret == -EOLDSNAPC) { |
779 | dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", | |
780 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); | |
781 | goto retry_snap; | |
782 | } | |
783 | ||
784 | return ret; | |
785 | } | |
786 | ||
787 | /* | |
788 | * llseek. be sure to verify file size on SEEK_END. | |
789 | */ | |
790 | static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) | |
791 | { | |
792 | struct inode *inode = file->f_mapping->host; | |
793 | int ret; | |
794 | ||
795 | mutex_lock(&inode->i_mutex); | |
796 | __ceph_do_pending_vmtruncate(inode); | |
6a82c47a SW |
797 | |
798 | if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) { | |
124e68e7 SW |
799 | ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); |
800 | if (ret < 0) { | |
801 | offset = ret; | |
802 | goto out; | |
803 | } | |
06222e49 JB |
804 | } |
805 | ||
806 | switch (origin) { | |
807 | case SEEK_END: | |
124e68e7 SW |
808 | offset += inode->i_size; |
809 | break; | |
810 | case SEEK_CUR: | |
811 | /* | |
812 | * Here we special-case the lseek(fd, 0, SEEK_CUR) | |
813 | * position-querying operation. Avoid rewriting the "same" | |
814 | * f_pos value back to the file because a concurrent read(), | |
815 | * write() or lseek() might have altered it | |
816 | */ | |
817 | if (offset == 0) { | |
818 | offset = file->f_pos; | |
819 | goto out; | |
820 | } | |
821 | offset += file->f_pos; | |
822 | break; | |
06222e49 JB |
823 | case SEEK_DATA: |
824 | if (offset >= inode->i_size) { | |
825 | ret = -ENXIO; | |
826 | goto out; | |
827 | } | |
828 | break; | |
829 | case SEEK_HOLE: | |
830 | if (offset >= inode->i_size) { | |
831 | ret = -ENXIO; | |
832 | goto out; | |
833 | } | |
834 | offset = inode->i_size; | |
835 | break; | |
124e68e7 SW |
836 | } |
837 | ||
838 | if (offset < 0 || offset > inode->i_sb->s_maxbytes) { | |
839 | offset = -EINVAL; | |
840 | goto out; | |
841 | } | |
842 | ||
843 | /* Special lock needed here? */ | |
844 | if (offset != file->f_pos) { | |
845 | file->f_pos = offset; | |
846 | file->f_version = 0; | |
847 | } | |
848 | ||
849 | out: | |
850 | mutex_unlock(&inode->i_mutex); | |
851 | return offset; | |
852 | } | |
853 | ||
854 | const struct file_operations ceph_file_fops = { | |
855 | .open = ceph_open, | |
856 | .release = ceph_release, | |
857 | .llseek = ceph_llseek, | |
858 | .read = do_sync_read, | |
859 | .write = do_sync_write, | |
860 | .aio_read = ceph_aio_read, | |
861 | .aio_write = ceph_aio_write, | |
862 | .mmap = ceph_mmap, | |
863 | .fsync = ceph_fsync, | |
40819f6f GF |
864 | .lock = ceph_lock, |
865 | .flock = ceph_flock, | |
124e68e7 SW |
866 | .splice_read = generic_file_splice_read, |
867 | .splice_write = generic_file_splice_write, | |
868 | .unlocked_ioctl = ceph_ioctl, | |
869 | .compat_ioctl = ceph_ioctl, | |
870 | }; | |
871 |