]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
2f2dc053 | 2 | |
496e5955 | 3 | #include <linux/fs.h> |
2f2dc053 | 4 | #include <linux/wait.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
54008399 | 6 | #include <linux/gfp.h> |
2f2dc053 | 7 | #include <linux/sched.h> |
3d14c5d2 YS |
8 | #include <linux/debugfs.h> |
9 | #include <linux/seq_file.h> | |
dbd0c8bf | 10 | #include <linux/utsname.h> |
2f2dc053 | 11 | |
2f2dc053 | 12 | #include "super.h" |
3d14c5d2 YS |
13 | #include "mds_client.h" |
14 | ||
1fe60e51 | 15 | #include <linux/ceph/ceph_features.h> |
3d14c5d2 YS |
16 | #include <linux/ceph/messenger.h> |
17 | #include <linux/ceph/decode.h> | |
18 | #include <linux/ceph/pagelist.h> | |
19 | #include <linux/ceph/auth.h> | |
20 | #include <linux/ceph/debugfs.h> | |
2f2dc053 SW |
21 | |
22 | /* | |
23 | * A cluster of MDS (metadata server) daemons is responsible for | |
24 | * managing the file system namespace (the directory hierarchy and | |
25 | * inodes) and for coordinating shared access to storage. Metadata is | |
26 | * partitioning hierarchically across a number of servers, and that | |
27 | * partition varies over time as the cluster adjusts the distribution | |
28 | * in order to balance load. | |
29 | * | |
30 | * The MDS client is primarily responsible to managing synchronous | |
31 | * metadata requests for operations like open, unlink, and so forth. | |
32 | * If there is a MDS failure, we find out about it when we (possibly | |
33 | * request and) receive a new MDS map, and can resubmit affected | |
34 | * requests. | |
35 | * | |
36 | * For the most part, though, we take advantage of a lossless | |
37 | * communications channel to the MDS, and do not need to worry about | |
38 | * timing out or resubmitting requests. | |
39 | * | |
40 | * We maintain a stateful "session" with each MDS we interact with. | |
41 | * Within each session, we sent periodic heartbeat messages to ensure | |
42 | * any capabilities or leases we have been issues remain valid. If | |
43 | * the session times out and goes stale, our leases and capabilities | |
44 | * are no longer valid. | |
45 | */ | |
46 | ||
20cb34ae | 47 | struct ceph_reconnect_state { |
44c99757 | 48 | int nr_caps; |
20cb34ae SW |
49 | struct ceph_pagelist *pagelist; |
50 | bool flock; | |
51 | }; | |
52 | ||
2f2dc053 SW |
53 | static void __wake_requests(struct ceph_mds_client *mdsc, |
54 | struct list_head *head); | |
55 | ||
9e32789f | 56 | static const struct ceph_connection_operations mds_con_ops; |
2f2dc053 SW |
57 | |
58 | ||
59 | /* | |
60 | * mds reply parsing | |
61 | */ | |
62 | ||
63 | /* | |
64 | * parse individual inode info | |
65 | */ | |
66 | static int parse_reply_info_in(void **p, void *end, | |
14303d20 | 67 | struct ceph_mds_reply_info_in *info, |
12b4629a | 68 | u64 features) |
2f2dc053 SW |
69 | { |
70 | int err = -EIO; | |
71 | ||
72 | info->in = *p; | |
73 | *p += sizeof(struct ceph_mds_reply_inode) + | |
74 | sizeof(*info->in->fragtree.splits) * | |
75 | le32_to_cpu(info->in->fragtree.nsplits); | |
76 | ||
77 | ceph_decode_32_safe(p, end, info->symlink_len, bad); | |
78 | ceph_decode_need(p, end, info->symlink_len, bad); | |
79 | info->symlink = *p; | |
80 | *p += info->symlink_len; | |
81 | ||
14303d20 SW |
82 | if (features & CEPH_FEATURE_DIRLAYOUTHASH) |
83 | ceph_decode_copy_safe(p, end, &info->dir_layout, | |
84 | sizeof(info->dir_layout), bad); | |
85 | else | |
86 | memset(&info->dir_layout, 0, sizeof(info->dir_layout)); | |
87 | ||
2f2dc053 SW |
88 | ceph_decode_32_safe(p, end, info->xattr_len, bad); |
89 | ceph_decode_need(p, end, info->xattr_len, bad); | |
90 | info->xattr_data = *p; | |
91 | *p += info->xattr_len; | |
fb01d1f8 YZ |
92 | |
93 | if (features & CEPH_FEATURE_MDS_INLINE_DATA) { | |
94 | ceph_decode_64_safe(p, end, info->inline_version, bad); | |
95 | ceph_decode_32_safe(p, end, info->inline_len, bad); | |
96 | ceph_decode_need(p, end, info->inline_len, bad); | |
97 | info->inline_data = *p; | |
98 | *p += info->inline_len; | |
99 | } else | |
100 | info->inline_version = CEPH_INLINE_NONE; | |
101 | ||
2f2dc053 SW |
102 | return 0; |
103 | bad: | |
104 | return err; | |
105 | } | |
106 | ||
107 | /* | |
108 | * parse a normal reply, which may contain a (dir+)dentry and/or a | |
109 | * target inode. | |
110 | */ | |
111 | static int parse_reply_info_trace(void **p, void *end, | |
14303d20 | 112 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 113 | u64 features) |
2f2dc053 SW |
114 | { |
115 | int err; | |
116 | ||
117 | if (info->head->is_dentry) { | |
14303d20 | 118 | err = parse_reply_info_in(p, end, &info->diri, features); |
2f2dc053 SW |
119 | if (err < 0) |
120 | goto out_bad; | |
121 | ||
122 | if (unlikely(*p + sizeof(*info->dirfrag) > end)) | |
123 | goto bad; | |
124 | info->dirfrag = *p; | |
125 | *p += sizeof(*info->dirfrag) + | |
126 | sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); | |
127 | if (unlikely(*p > end)) | |
128 | goto bad; | |
129 | ||
130 | ceph_decode_32_safe(p, end, info->dname_len, bad); | |
131 | ceph_decode_need(p, end, info->dname_len, bad); | |
132 | info->dname = *p; | |
133 | *p += info->dname_len; | |
134 | info->dlease = *p; | |
135 | *p += sizeof(*info->dlease); | |
136 | } | |
137 | ||
138 | if (info->head->is_target) { | |
14303d20 | 139 | err = parse_reply_info_in(p, end, &info->targeti, features); |
2f2dc053 SW |
140 | if (err < 0) |
141 | goto out_bad; | |
142 | } | |
143 | ||
144 | if (unlikely(*p != end)) | |
145 | goto bad; | |
146 | return 0; | |
147 | ||
148 | bad: | |
149 | err = -EIO; | |
150 | out_bad: | |
151 | pr_err("problem parsing mds trace %d\n", err); | |
152 | return err; | |
153 | } | |
154 | ||
155 | /* | |
156 | * parse readdir results | |
157 | */ | |
158 | static int parse_reply_info_dir(void **p, void *end, | |
14303d20 | 159 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 160 | u64 features) |
2f2dc053 SW |
161 | { |
162 | u32 num, i = 0; | |
163 | int err; | |
164 | ||
165 | info->dir_dir = *p; | |
166 | if (*p + sizeof(*info->dir_dir) > end) | |
167 | goto bad; | |
168 | *p += sizeof(*info->dir_dir) + | |
169 | sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); | |
170 | if (*p > end) | |
171 | goto bad; | |
172 | ||
173 | ceph_decode_need(p, end, sizeof(num) + 2, bad); | |
c89136ea SW |
174 | num = ceph_decode_32(p); |
175 | info->dir_end = ceph_decode_8(p); | |
176 | info->dir_complete = ceph_decode_8(p); | |
2f2dc053 SW |
177 | if (num == 0) |
178 | goto done; | |
179 | ||
54008399 | 180 | BUG_ON(!info->dir_in); |
2f2dc053 SW |
181 | info->dir_dname = (void *)(info->dir_in + num); |
182 | info->dir_dname_len = (void *)(info->dir_dname + num); | |
183 | info->dir_dlease = (void *)(info->dir_dname_len + num); | |
54008399 YZ |
184 | if ((unsigned long)(info->dir_dlease + num) > |
185 | (unsigned long)info->dir_in + info->dir_buf_size) { | |
186 | pr_err("dir contents are larger than expected\n"); | |
187 | WARN_ON(1); | |
188 | goto bad; | |
189 | } | |
2f2dc053 | 190 | |
54008399 | 191 | info->dir_nr = num; |
2f2dc053 SW |
192 | while (num) { |
193 | /* dentry */ | |
194 | ceph_decode_need(p, end, sizeof(u32)*2, bad); | |
c89136ea | 195 | info->dir_dname_len[i] = ceph_decode_32(p); |
2f2dc053 SW |
196 | ceph_decode_need(p, end, info->dir_dname_len[i], bad); |
197 | info->dir_dname[i] = *p; | |
198 | *p += info->dir_dname_len[i]; | |
199 | dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i], | |
200 | info->dir_dname[i]); | |
201 | info->dir_dlease[i] = *p; | |
202 | *p += sizeof(struct ceph_mds_reply_lease); | |
203 | ||
204 | /* inode */ | |
14303d20 | 205 | err = parse_reply_info_in(p, end, &info->dir_in[i], features); |
2f2dc053 SW |
206 | if (err < 0) |
207 | goto out_bad; | |
208 | i++; | |
209 | num--; | |
210 | } | |
211 | ||
212 | done: | |
213 | if (*p != end) | |
214 | goto bad; | |
215 | return 0; | |
216 | ||
217 | bad: | |
218 | err = -EIO; | |
219 | out_bad: | |
220 | pr_err("problem parsing dir contents %d\n", err); | |
221 | return err; | |
222 | } | |
223 | ||
25933abd HS |
224 | /* |
225 | * parse fcntl F_GETLK results | |
226 | */ | |
227 | static int parse_reply_info_filelock(void **p, void *end, | |
14303d20 | 228 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 229 | u64 features) |
25933abd HS |
230 | { |
231 | if (*p + sizeof(*info->filelock_reply) > end) | |
232 | goto bad; | |
233 | ||
234 | info->filelock_reply = *p; | |
235 | *p += sizeof(*info->filelock_reply); | |
236 | ||
237 | if (unlikely(*p != end)) | |
238 | goto bad; | |
239 | return 0; | |
240 | ||
241 | bad: | |
242 | return -EIO; | |
243 | } | |
244 | ||
6e8575fa SL |
245 | /* |
246 | * parse create results | |
247 | */ | |
248 | static int parse_reply_info_create(void **p, void *end, | |
249 | struct ceph_mds_reply_info_parsed *info, | |
12b4629a | 250 | u64 features) |
6e8575fa SL |
251 | { |
252 | if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { | |
253 | if (*p == end) { | |
254 | info->has_create_ino = false; | |
255 | } else { | |
256 | info->has_create_ino = true; | |
257 | info->ino = ceph_decode_64(p); | |
258 | } | |
259 | } | |
260 | ||
261 | if (unlikely(*p != end)) | |
262 | goto bad; | |
263 | return 0; | |
264 | ||
265 | bad: | |
266 | return -EIO; | |
267 | } | |
268 | ||
25933abd HS |
269 | /* |
270 | * parse extra results | |
271 | */ | |
272 | static int parse_reply_info_extra(void **p, void *end, | |
14303d20 | 273 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 274 | u64 features) |
25933abd HS |
275 | { |
276 | if (info->head->op == CEPH_MDS_OP_GETFILELOCK) | |
14303d20 | 277 | return parse_reply_info_filelock(p, end, info, features); |
8a034497 YZ |
278 | else if (info->head->op == CEPH_MDS_OP_READDIR || |
279 | info->head->op == CEPH_MDS_OP_LSSNAP) | |
14303d20 | 280 | return parse_reply_info_dir(p, end, info, features); |
6e8575fa SL |
281 | else if (info->head->op == CEPH_MDS_OP_CREATE) |
282 | return parse_reply_info_create(p, end, info, features); | |
283 | else | |
284 | return -EIO; | |
25933abd HS |
285 | } |
286 | ||
2f2dc053 SW |
287 | /* |
288 | * parse entire mds reply | |
289 | */ | |
290 | static int parse_reply_info(struct ceph_msg *msg, | |
14303d20 | 291 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 292 | u64 features) |
2f2dc053 SW |
293 | { |
294 | void *p, *end; | |
295 | u32 len; | |
296 | int err; | |
297 | ||
298 | info->head = msg->front.iov_base; | |
299 | p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); | |
300 | end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); | |
301 | ||
302 | /* trace */ | |
303 | ceph_decode_32_safe(&p, end, len, bad); | |
304 | if (len > 0) { | |
32852a81 | 305 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 306 | err = parse_reply_info_trace(&p, p+len, info, features); |
2f2dc053 SW |
307 | if (err < 0) |
308 | goto out_bad; | |
309 | } | |
310 | ||
25933abd | 311 | /* extra */ |
2f2dc053 SW |
312 | ceph_decode_32_safe(&p, end, len, bad); |
313 | if (len > 0) { | |
32852a81 | 314 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 315 | err = parse_reply_info_extra(&p, p+len, info, features); |
2f2dc053 SW |
316 | if (err < 0) |
317 | goto out_bad; | |
318 | } | |
319 | ||
320 | /* snap blob */ | |
321 | ceph_decode_32_safe(&p, end, len, bad); | |
322 | info->snapblob_len = len; | |
323 | info->snapblob = p; | |
324 | p += len; | |
325 | ||
326 | if (p != end) | |
327 | goto bad; | |
328 | return 0; | |
329 | ||
330 | bad: | |
331 | err = -EIO; | |
332 | out_bad: | |
333 | pr_err("mds parse_reply err %d\n", err); | |
334 | return err; | |
335 | } | |
336 | ||
337 | static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) | |
338 | { | |
54008399 YZ |
339 | if (!info->dir_in) |
340 | return; | |
341 | free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size)); | |
2f2dc053 SW |
342 | } |
343 | ||
344 | ||
345 | /* | |
346 | * sessions | |
347 | */ | |
a687ecaf | 348 | const char *ceph_session_state_name(int s) |
2f2dc053 SW |
349 | { |
350 | switch (s) { | |
351 | case CEPH_MDS_SESSION_NEW: return "new"; | |
352 | case CEPH_MDS_SESSION_OPENING: return "opening"; | |
353 | case CEPH_MDS_SESSION_OPEN: return "open"; | |
354 | case CEPH_MDS_SESSION_HUNG: return "hung"; | |
355 | case CEPH_MDS_SESSION_CLOSING: return "closing"; | |
44ca18f2 | 356 | case CEPH_MDS_SESSION_RESTARTING: return "restarting"; |
2f2dc053 SW |
357 | case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; |
358 | default: return "???"; | |
359 | } | |
360 | } | |
361 | ||
362 | static struct ceph_mds_session *get_session(struct ceph_mds_session *s) | |
363 | { | |
364 | if (atomic_inc_not_zero(&s->s_ref)) { | |
365 | dout("mdsc get_session %p %d -> %d\n", s, | |
366 | atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); | |
367 | return s; | |
368 | } else { | |
369 | dout("mdsc get_session %p 0 -- FAIL", s); | |
370 | return NULL; | |
371 | } | |
372 | } | |
373 | ||
374 | void ceph_put_mds_session(struct ceph_mds_session *s) | |
375 | { | |
376 | dout("mdsc put_session %p %d -> %d\n", s, | |
377 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); | |
4e7a5dcd | 378 | if (atomic_dec_and_test(&s->s_ref)) { |
6c4a1915 | 379 | if (s->s_auth.authorizer) |
27859f97 SW |
380 | ceph_auth_destroy_authorizer( |
381 | s->s_mdsc->fsc->client->monc.auth, | |
382 | s->s_auth.authorizer); | |
2f2dc053 | 383 | kfree(s); |
4e7a5dcd | 384 | } |
2f2dc053 SW |
385 | } |
386 | ||
387 | /* | |
388 | * called under mdsc->mutex | |
389 | */ | |
390 | struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, | |
391 | int mds) | |
392 | { | |
393 | struct ceph_mds_session *session; | |
394 | ||
395 | if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) | |
396 | return NULL; | |
397 | session = mdsc->sessions[mds]; | |
398 | dout("lookup_mds_session %p %d\n", session, | |
399 | atomic_read(&session->s_ref)); | |
400 | get_session(session); | |
401 | return session; | |
402 | } | |
403 | ||
404 | static bool __have_session(struct ceph_mds_client *mdsc, int mds) | |
405 | { | |
406 | if (mds >= mdsc->max_sessions) | |
407 | return false; | |
408 | return mdsc->sessions[mds]; | |
409 | } | |
410 | ||
2600d2dd SW |
411 | static int __verify_registered_session(struct ceph_mds_client *mdsc, |
412 | struct ceph_mds_session *s) | |
413 | { | |
414 | if (s->s_mds >= mdsc->max_sessions || | |
415 | mdsc->sessions[s->s_mds] != s) | |
416 | return -ENOENT; | |
417 | return 0; | |
418 | } | |
419 | ||
2f2dc053 SW |
420 | /* |
421 | * create+register a new session for given mds. | |
422 | * called under mdsc->mutex. | |
423 | */ | |
424 | static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |
425 | int mds) | |
426 | { | |
427 | struct ceph_mds_session *s; | |
428 | ||
c338c07c NY |
429 | if (mds >= mdsc->mdsmap->m_max_mds) |
430 | return ERR_PTR(-EINVAL); | |
431 | ||
2f2dc053 | 432 | s = kzalloc(sizeof(*s), GFP_NOFS); |
4736b009 DC |
433 | if (!s) |
434 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
435 | s->s_mdsc = mdsc; |
436 | s->s_mds = mds; | |
437 | s->s_state = CEPH_MDS_SESSION_NEW; | |
438 | s->s_ttl = 0; | |
439 | s->s_seq = 0; | |
440 | mutex_init(&s->s_mutex); | |
441 | ||
b7a9e5dd | 442 | ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); |
2f2dc053 | 443 | |
d8fb02ab | 444 | spin_lock_init(&s->s_gen_ttl_lock); |
2f2dc053 | 445 | s->s_cap_gen = 0; |
1ce208a6 | 446 | s->s_cap_ttl = jiffies - 1; |
d8fb02ab AE |
447 | |
448 | spin_lock_init(&s->s_cap_lock); | |
2f2dc053 SW |
449 | s->s_renew_requested = 0; |
450 | s->s_renew_seq = 0; | |
451 | INIT_LIST_HEAD(&s->s_caps); | |
452 | s->s_nr_caps = 0; | |
5dacf091 | 453 | s->s_trim_caps = 0; |
2f2dc053 SW |
454 | atomic_set(&s->s_ref, 1); |
455 | INIT_LIST_HEAD(&s->s_waiting); | |
456 | INIT_LIST_HEAD(&s->s_unsafe); | |
457 | s->s_num_cap_releases = 0; | |
99a9c273 | 458 | s->s_cap_reconnect = 0; |
7c1332b8 | 459 | s->s_cap_iterator = NULL; |
2f2dc053 | 460 | INIT_LIST_HEAD(&s->s_cap_releases); |
2f2dc053 SW |
461 | INIT_LIST_HEAD(&s->s_cap_flushing); |
462 | INIT_LIST_HEAD(&s->s_cap_snaps_flushing); | |
463 | ||
464 | dout("register_session mds%d\n", mds); | |
465 | if (mds >= mdsc->max_sessions) { | |
466 | int newmax = 1 << get_count_order(mds+1); | |
467 | struct ceph_mds_session **sa; | |
468 | ||
469 | dout("register_session realloc to %d\n", newmax); | |
470 | sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); | |
471 | if (sa == NULL) | |
42ce56e5 | 472 | goto fail_realloc; |
2f2dc053 SW |
473 | if (mdsc->sessions) { |
474 | memcpy(sa, mdsc->sessions, | |
475 | mdsc->max_sessions * sizeof(void *)); | |
476 | kfree(mdsc->sessions); | |
477 | } | |
478 | mdsc->sessions = sa; | |
479 | mdsc->max_sessions = newmax; | |
480 | } | |
481 | mdsc->sessions[mds] = s; | |
86d8f67b | 482 | atomic_inc(&mdsc->num_sessions); |
2f2dc053 | 483 | atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ |
42ce56e5 | 484 | |
b7a9e5dd SW |
485 | ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, |
486 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
42ce56e5 | 487 | |
2f2dc053 | 488 | return s; |
42ce56e5 SW |
489 | |
490 | fail_realloc: | |
491 | kfree(s); | |
492 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
493 | } |
494 | ||
495 | /* | |
496 | * called under mdsc->mutex | |
497 | */ | |
2600d2dd | 498 | static void __unregister_session(struct ceph_mds_client *mdsc, |
42ce56e5 | 499 | struct ceph_mds_session *s) |
2f2dc053 | 500 | { |
2600d2dd SW |
501 | dout("__unregister_session mds%d %p\n", s->s_mds, s); |
502 | BUG_ON(mdsc->sessions[s->s_mds] != s); | |
42ce56e5 SW |
503 | mdsc->sessions[s->s_mds] = NULL; |
504 | ceph_con_close(&s->s_con); | |
505 | ceph_put_mds_session(s); | |
86d8f67b | 506 | atomic_dec(&mdsc->num_sessions); |
2f2dc053 SW |
507 | } |
508 | ||
509 | /* | |
510 | * drop session refs in request. | |
511 | * | |
512 | * should be last request ref, or hold mdsc->mutex | |
513 | */ | |
514 | static void put_request_session(struct ceph_mds_request *req) | |
515 | { | |
516 | if (req->r_session) { | |
517 | ceph_put_mds_session(req->r_session); | |
518 | req->r_session = NULL; | |
519 | } | |
520 | } | |
521 | ||
153c8e6b | 522 | void ceph_mdsc_release_request(struct kref *kref) |
2f2dc053 | 523 | { |
153c8e6b SW |
524 | struct ceph_mds_request *req = container_of(kref, |
525 | struct ceph_mds_request, | |
526 | r_kref); | |
54008399 | 527 | destroy_reply_info(&req->r_reply_info); |
153c8e6b SW |
528 | if (req->r_request) |
529 | ceph_msg_put(req->r_request); | |
54008399 | 530 | if (req->r_reply) |
153c8e6b | 531 | ceph_msg_put(req->r_reply); |
153c8e6b | 532 | if (req->r_inode) { |
41b02e1f | 533 | ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); |
153c8e6b SW |
534 | iput(req->r_inode); |
535 | } | |
536 | if (req->r_locked_dir) | |
41b02e1f | 537 | ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); |
e96a650a | 538 | iput(req->r_target_inode); |
153c8e6b SW |
539 | if (req->r_dentry) |
540 | dput(req->r_dentry); | |
844d87c3 SW |
541 | if (req->r_old_dentry) |
542 | dput(req->r_old_dentry); | |
543 | if (req->r_old_dentry_dir) { | |
41b02e1f SW |
544 | /* |
545 | * track (and drop pins for) r_old_dentry_dir | |
546 | * separately, since r_old_dentry's d_parent may have | |
547 | * changed between the dir mutex being dropped and | |
548 | * this request being freed. | |
549 | */ | |
550 | ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), | |
551 | CEPH_CAP_PIN); | |
41b02e1f | 552 | iput(req->r_old_dentry_dir); |
2f2dc053 | 553 | } |
153c8e6b SW |
554 | kfree(req->r_path1); |
555 | kfree(req->r_path2); | |
25e6bae3 YZ |
556 | if (req->r_pagelist) |
557 | ceph_pagelist_release(req->r_pagelist); | |
153c8e6b | 558 | put_request_session(req); |
37151668 | 559 | ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
153c8e6b | 560 | kfree(req); |
2f2dc053 SW |
561 | } |
562 | ||
563 | /* | |
564 | * lookup session, bump ref if found. | |
565 | * | |
566 | * called under mdsc->mutex. | |
567 | */ | |
568 | static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, | |
569 | u64 tid) | |
570 | { | |
571 | struct ceph_mds_request *req; | |
44ca18f2 SW |
572 | struct rb_node *n = mdsc->request_tree.rb_node; |
573 | ||
574 | while (n) { | |
575 | req = rb_entry(n, struct ceph_mds_request, r_node); | |
576 | if (tid < req->r_tid) | |
577 | n = n->rb_left; | |
578 | else if (tid > req->r_tid) | |
579 | n = n->rb_right; | |
580 | else { | |
581 | ceph_mdsc_get_request(req); | |
582 | return req; | |
583 | } | |
584 | } | |
585 | return NULL; | |
586 | } | |
587 | ||
588 | static void __insert_request(struct ceph_mds_client *mdsc, | |
589 | struct ceph_mds_request *new) | |
590 | { | |
591 | struct rb_node **p = &mdsc->request_tree.rb_node; | |
592 | struct rb_node *parent = NULL; | |
593 | struct ceph_mds_request *req = NULL; | |
594 | ||
595 | while (*p) { | |
596 | parent = *p; | |
597 | req = rb_entry(parent, struct ceph_mds_request, r_node); | |
598 | if (new->r_tid < req->r_tid) | |
599 | p = &(*p)->rb_left; | |
600 | else if (new->r_tid > req->r_tid) | |
601 | p = &(*p)->rb_right; | |
602 | else | |
603 | BUG(); | |
604 | } | |
605 | ||
606 | rb_link_node(&new->r_node, parent, p); | |
607 | rb_insert_color(&new->r_node, &mdsc->request_tree); | |
2f2dc053 SW |
608 | } |
609 | ||
610 | /* | |
611 | * Register an in-flight request, and assign a tid. Link to directory | |
612 | * are modifying (if any). | |
613 | * | |
614 | * Called under mdsc->mutex. | |
615 | */ | |
616 | static void __register_request(struct ceph_mds_client *mdsc, | |
617 | struct ceph_mds_request *req, | |
618 | struct inode *dir) | |
619 | { | |
620 | req->r_tid = ++mdsc->last_tid; | |
621 | if (req->r_num_caps) | |
37151668 YS |
622 | ceph_reserve_caps(mdsc, &req->r_caps_reservation, |
623 | req->r_num_caps); | |
2f2dc053 SW |
624 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
625 | ceph_mdsc_get_request(req); | |
44ca18f2 | 626 | __insert_request(mdsc, req); |
2f2dc053 | 627 | |
cb4276cc SW |
628 | req->r_uid = current_fsuid(); |
629 | req->r_gid = current_fsgid(); | |
630 | ||
e8a7b8b1 YZ |
631 | if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) |
632 | mdsc->oldest_tid = req->r_tid; | |
633 | ||
2f2dc053 SW |
634 | if (dir) { |
635 | struct ceph_inode_info *ci = ceph_inode(dir); | |
636 | ||
3b663780 | 637 | ihold(dir); |
2f2dc053 SW |
638 | spin_lock(&ci->i_unsafe_lock); |
639 | req->r_unsafe_dir = dir; | |
640 | list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); | |
641 | spin_unlock(&ci->i_unsafe_lock); | |
642 | } | |
643 | } | |
644 | ||
645 | static void __unregister_request(struct ceph_mds_client *mdsc, | |
646 | struct ceph_mds_request *req) | |
647 | { | |
648 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | |
e8a7b8b1 YZ |
649 | |
650 | if (req->r_tid == mdsc->oldest_tid) { | |
651 | struct rb_node *p = rb_next(&req->r_node); | |
652 | mdsc->oldest_tid = 0; | |
653 | while (p) { | |
654 | struct ceph_mds_request *next_req = | |
655 | rb_entry(p, struct ceph_mds_request, r_node); | |
656 | if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { | |
657 | mdsc->oldest_tid = next_req->r_tid; | |
658 | break; | |
659 | } | |
660 | p = rb_next(p); | |
661 | } | |
662 | } | |
663 | ||
44ca18f2 | 664 | rb_erase(&req->r_node, &mdsc->request_tree); |
80fc7314 | 665 | RB_CLEAR_NODE(&req->r_node); |
2f2dc053 SW |
666 | |
667 | if (req->r_unsafe_dir) { | |
668 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); | |
669 | ||
670 | spin_lock(&ci->i_unsafe_lock); | |
671 | list_del_init(&req->r_unsafe_dir_item); | |
672 | spin_unlock(&ci->i_unsafe_lock); | |
3b663780 SW |
673 | |
674 | iput(req->r_unsafe_dir); | |
675 | req->r_unsafe_dir = NULL; | |
2f2dc053 | 676 | } |
94aa8ae1 | 677 | |
fc55d2c9 YZ |
678 | complete_all(&req->r_safe_completion); |
679 | ||
94aa8ae1 | 680 | ceph_mdsc_put_request(req); |
2f2dc053 SW |
681 | } |
682 | ||
683 | /* | |
684 | * Choose mds to send request to next. If there is a hint set in the | |
685 | * request (e.g., due to a prior forward hint from the mds), use that. | |
686 | * Otherwise, consult frag tree and/or caps to identify the | |
687 | * appropriate mds. If all else fails, choose randomly. | |
688 | * | |
689 | * Called under mdsc->mutex. | |
690 | */ | |
7fd7d101 | 691 | static struct dentry *get_nonsnap_parent(struct dentry *dentry) |
eb6bb1c5 | 692 | { |
d79698da SW |
693 | /* |
694 | * we don't need to worry about protecting the d_parent access | |
695 | * here because we never renaming inside the snapped namespace | |
696 | * except to resplice to another snapdir, and either the old or new | |
697 | * result is a valid result. | |
698 | */ | |
2b0143b5 | 699 | while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) |
eb6bb1c5 SW |
700 | dentry = dentry->d_parent; |
701 | return dentry; | |
702 | } | |
703 | ||
2f2dc053 SW |
704 | static int __choose_mds(struct ceph_mds_client *mdsc, |
705 | struct ceph_mds_request *req) | |
706 | { | |
707 | struct inode *inode; | |
708 | struct ceph_inode_info *ci; | |
709 | struct ceph_cap *cap; | |
710 | int mode = req->r_direct_mode; | |
711 | int mds = -1; | |
712 | u32 hash = req->r_direct_hash; | |
713 | bool is_hash = req->r_direct_is_hash; | |
714 | ||
715 | /* | |
716 | * is there a specific mds we should try? ignore hint if we have | |
717 | * no session and the mds is not up (active or recovering). | |
718 | */ | |
719 | if (req->r_resend_mds >= 0 && | |
720 | (__have_session(mdsc, req->r_resend_mds) || | |
721 | ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { | |
722 | dout("choose_mds using resend_mds mds%d\n", | |
723 | req->r_resend_mds); | |
724 | return req->r_resend_mds; | |
725 | } | |
726 | ||
727 | if (mode == USE_RANDOM_MDS) | |
728 | goto random; | |
729 | ||
730 | inode = NULL; | |
731 | if (req->r_inode) { | |
732 | inode = req->r_inode; | |
733 | } else if (req->r_dentry) { | |
d79698da SW |
734 | /* ignore race with rename; old or new d_parent is okay */ |
735 | struct dentry *parent = req->r_dentry->d_parent; | |
2b0143b5 | 736 | struct inode *dir = d_inode(parent); |
eb6bb1c5 | 737 | |
3d14c5d2 | 738 | if (dir->i_sb != mdsc->fsc->sb) { |
eb6bb1c5 | 739 | /* not this fs! */ |
2b0143b5 | 740 | inode = d_inode(req->r_dentry); |
eb6bb1c5 SW |
741 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { |
742 | /* direct snapped/virtual snapdir requests | |
743 | * based on parent dir inode */ | |
d79698da | 744 | struct dentry *dn = get_nonsnap_parent(parent); |
2b0143b5 | 745 | inode = d_inode(dn); |
eb6bb1c5 | 746 | dout("__choose_mds using nonsnap parent %p\n", inode); |
ca18bede | 747 | } else { |
eb6bb1c5 | 748 | /* dentry target */ |
2b0143b5 | 749 | inode = d_inode(req->r_dentry); |
ca18bede YZ |
750 | if (!inode || mode == USE_AUTH_MDS) { |
751 | /* dir + name */ | |
752 | inode = dir; | |
753 | hash = ceph_dentry_hash(dir, req->r_dentry); | |
754 | is_hash = true; | |
755 | } | |
2f2dc053 SW |
756 | } |
757 | } | |
eb6bb1c5 | 758 | |
2f2dc053 SW |
759 | dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, |
760 | (int)hash, mode); | |
761 | if (!inode) | |
762 | goto random; | |
763 | ci = ceph_inode(inode); | |
764 | ||
765 | if (is_hash && S_ISDIR(inode->i_mode)) { | |
766 | struct ceph_inode_frag frag; | |
767 | int found; | |
768 | ||
769 | ceph_choose_frag(ci, hash, &frag, &found); | |
770 | if (found) { | |
771 | if (mode == USE_ANY_MDS && frag.ndist > 0) { | |
772 | u8 r; | |
773 | ||
774 | /* choose a random replica */ | |
775 | get_random_bytes(&r, 1); | |
776 | r %= frag.ndist; | |
777 | mds = frag.dist[r]; | |
778 | dout("choose_mds %p %llx.%llx " | |
779 | "frag %u mds%d (%d/%d)\n", | |
780 | inode, ceph_vinop(inode), | |
d66bbd44 | 781 | frag.frag, mds, |
2f2dc053 | 782 | (int)r, frag.ndist); |
d66bbd44 SW |
783 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
784 | CEPH_MDS_STATE_ACTIVE) | |
785 | return mds; | |
2f2dc053 SW |
786 | } |
787 | ||
788 | /* since this file/dir wasn't known to be | |
789 | * replicated, then we want to look for the | |
790 | * authoritative mds. */ | |
791 | mode = USE_AUTH_MDS; | |
792 | if (frag.mds >= 0) { | |
793 | /* choose auth mds */ | |
794 | mds = frag.mds; | |
795 | dout("choose_mds %p %llx.%llx " | |
796 | "frag %u mds%d (auth)\n", | |
797 | inode, ceph_vinop(inode), frag.frag, mds); | |
d66bbd44 SW |
798 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
799 | CEPH_MDS_STATE_ACTIVE) | |
800 | return mds; | |
2f2dc053 SW |
801 | } |
802 | } | |
803 | } | |
804 | ||
be655596 | 805 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
806 | cap = NULL; |
807 | if (mode == USE_AUTH_MDS) | |
808 | cap = ci->i_auth_cap; | |
809 | if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) | |
810 | cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); | |
811 | if (!cap) { | |
be655596 | 812 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
813 | goto random; |
814 | } | |
815 | mds = cap->session->s_mds; | |
816 | dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", | |
817 | inode, ceph_vinop(inode), mds, | |
818 | cap == ci->i_auth_cap ? "auth " : "", cap); | |
be655596 | 819 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
820 | return mds; |
821 | ||
822 | random: | |
823 | mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); | |
824 | dout("choose_mds chose random mds%d\n", mds); | |
825 | return mds; | |
826 | } | |
827 | ||
828 | ||
829 | /* | |
830 | * session messages | |
831 | */ | |
832 | static struct ceph_msg *create_session_msg(u32 op, u64 seq) | |
833 | { | |
834 | struct ceph_msg *msg; | |
835 | struct ceph_mds_session_head *h; | |
836 | ||
b61c2763 SW |
837 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, |
838 | false); | |
a79832f2 | 839 | if (!msg) { |
2f2dc053 | 840 | pr_err("create_session_msg ENOMEM creating msg\n"); |
a79832f2 | 841 | return NULL; |
2f2dc053 SW |
842 | } |
843 | h = msg->front.iov_base; | |
844 | h->op = cpu_to_le32(op); | |
845 | h->seq = cpu_to_le64(seq); | |
dbd0c8bf JS |
846 | |
847 | return msg; | |
848 | } | |
849 | ||
850 | /* | |
851 | * session message, specialization for CEPH_SESSION_REQUEST_OPEN | |
852 | * to include additional client metadata fields. | |
853 | */ | |
854 | static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) | |
855 | { | |
856 | struct ceph_msg *msg; | |
857 | struct ceph_mds_session_head *h; | |
858 | int i = -1; | |
859 | int metadata_bytes = 0; | |
860 | int metadata_key_count = 0; | |
861 | struct ceph_options *opt = mdsc->fsc->client->options; | |
862 | void *p; | |
863 | ||
a6a5ce4f | 864 | const char* metadata[][2] = { |
dbd0c8bf | 865 | {"hostname", utsname()->nodename}, |
a6a5ce4f | 866 | {"kernel_version", utsname()->release}, |
dbd0c8bf JS |
867 | {"entity_id", opt->name ? opt->name : ""}, |
868 | {NULL, NULL} | |
869 | }; | |
870 | ||
871 | /* Calculate serialized length of metadata */ | |
872 | metadata_bytes = 4; /* map length */ | |
873 | for (i = 0; metadata[i][0] != NULL; ++i) { | |
874 | metadata_bytes += 8 + strlen(metadata[i][0]) + | |
875 | strlen(metadata[i][1]); | |
876 | metadata_key_count++; | |
877 | } | |
878 | ||
879 | /* Allocate the message */ | |
880 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, | |
881 | GFP_NOFS, false); | |
882 | if (!msg) { | |
883 | pr_err("create_session_msg ENOMEM creating msg\n"); | |
884 | return NULL; | |
885 | } | |
886 | h = msg->front.iov_base; | |
887 | h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); | |
888 | h->seq = cpu_to_le64(seq); | |
889 | ||
890 | /* | |
891 | * Serialize client metadata into waiting buffer space, using | |
892 | * the format that userspace expects for map<string, string> | |
7cfa0313 JS |
893 | * |
894 | * ClientSession messages with metadata are v2 | |
dbd0c8bf | 895 | */ |
7cfa0313 JS |
896 | msg->hdr.version = cpu_to_le16(2); |
897 | msg->hdr.compat_version = cpu_to_le16(1); | |
dbd0c8bf JS |
898 | |
899 | /* The write pointer, following the session_head structure */ | |
900 | p = msg->front.iov_base + sizeof(*h); | |
901 | ||
902 | /* Number of entries in the map */ | |
903 | ceph_encode_32(&p, metadata_key_count); | |
904 | ||
905 | /* Two length-prefixed strings for each entry in the map */ | |
906 | for (i = 0; metadata[i][0] != NULL; ++i) { | |
907 | size_t const key_len = strlen(metadata[i][0]); | |
908 | size_t const val_len = strlen(metadata[i][1]); | |
909 | ||
910 | ceph_encode_32(&p, key_len); | |
911 | memcpy(p, metadata[i][0], key_len); | |
912 | p += key_len; | |
913 | ceph_encode_32(&p, val_len); | |
914 | memcpy(p, metadata[i][1], val_len); | |
915 | p += val_len; | |
916 | } | |
917 | ||
2f2dc053 SW |
918 | return msg; |
919 | } | |
920 | ||
921 | /* | |
922 | * send session open request. | |
923 | * | |
924 | * called under mdsc->mutex | |
925 | */ | |
926 | static int __open_session(struct ceph_mds_client *mdsc, | |
927 | struct ceph_mds_session *session) | |
928 | { | |
929 | struct ceph_msg *msg; | |
930 | int mstate; | |
931 | int mds = session->s_mds; | |
2f2dc053 SW |
932 | |
933 | /* wait for mds to go active? */ | |
934 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); | |
935 | dout("open_session to mds%d (%s)\n", mds, | |
936 | ceph_mds_state_name(mstate)); | |
937 | session->s_state = CEPH_MDS_SESSION_OPENING; | |
938 | session->s_renew_requested = jiffies; | |
939 | ||
940 | /* send connect message */ | |
dbd0c8bf | 941 | msg = create_session_open_msg(mdsc, session->s_seq); |
a79832f2 SW |
942 | if (!msg) |
943 | return -ENOMEM; | |
2f2dc053 | 944 | ceph_con_send(&session->s_con, msg); |
2f2dc053 SW |
945 | return 0; |
946 | } | |
947 | ||
ed0552a1 SW |
948 | /* |
949 | * open sessions for any export targets for the given mds | |
950 | * | |
951 | * called under mdsc->mutex | |
952 | */ | |
5d72d13c YZ |
953 | static struct ceph_mds_session * |
954 | __open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
955 | { | |
956 | struct ceph_mds_session *session; | |
957 | ||
958 | session = __ceph_lookup_mds_session(mdsc, target); | |
959 | if (!session) { | |
960 | session = register_session(mdsc, target); | |
961 | if (IS_ERR(session)) | |
962 | return session; | |
963 | } | |
964 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
965 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
966 | __open_session(mdsc, session); | |
967 | ||
968 | return session; | |
969 | } | |
970 | ||
971 | struct ceph_mds_session * | |
972 | ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
973 | { | |
974 | struct ceph_mds_session *session; | |
975 | ||
976 | dout("open_export_target_session to mds%d\n", target); | |
977 | ||
978 | mutex_lock(&mdsc->mutex); | |
979 | session = __open_export_target_session(mdsc, target); | |
980 | mutex_unlock(&mdsc->mutex); | |
981 | ||
982 | return session; | |
983 | } | |
984 | ||
ed0552a1 SW |
985 | static void __open_export_target_sessions(struct ceph_mds_client *mdsc, |
986 | struct ceph_mds_session *session) | |
987 | { | |
988 | struct ceph_mds_info *mi; | |
989 | struct ceph_mds_session *ts; | |
990 | int i, mds = session->s_mds; | |
ed0552a1 SW |
991 | |
992 | if (mds >= mdsc->mdsmap->m_max_mds) | |
993 | return; | |
5d72d13c | 994 | |
ed0552a1 SW |
995 | mi = &mdsc->mdsmap->m_info[mds]; |
996 | dout("open_export_target_sessions for mds%d (%d targets)\n", | |
997 | session->s_mds, mi->num_export_targets); | |
998 | ||
999 | for (i = 0; i < mi->num_export_targets; i++) { | |
5d72d13c YZ |
1000 | ts = __open_export_target_session(mdsc, mi->export_targets[i]); |
1001 | if (!IS_ERR(ts)) | |
1002 | ceph_put_mds_session(ts); | |
ed0552a1 SW |
1003 | } |
1004 | } | |
1005 | ||
154f42c2 SW |
1006 | void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, |
1007 | struct ceph_mds_session *session) | |
1008 | { | |
1009 | mutex_lock(&mdsc->mutex); | |
1010 | __open_export_target_sessions(mdsc, session); | |
1011 | mutex_unlock(&mdsc->mutex); | |
1012 | } | |
1013 | ||
2f2dc053 SW |
1014 | /* |
1015 | * session caps | |
1016 | */ | |
1017 | ||
745a8e3b YZ |
1018 | /* caller holds s_cap_lock, we drop it */ |
1019 | static void cleanup_cap_releases(struct ceph_mds_client *mdsc, | |
1020 | struct ceph_mds_session *session) | |
1021 | __releases(session->s_cap_lock) | |
2f2dc053 | 1022 | { |
745a8e3b YZ |
1023 | LIST_HEAD(tmp_list); |
1024 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1025 | session->s_num_cap_releases = 0; | |
1026 | spin_unlock(&session->s_cap_lock); | |
2f2dc053 | 1027 | |
745a8e3b YZ |
1028 | dout("cleanup_cap_releases mds%d\n", session->s_mds); |
1029 | while (!list_empty(&tmp_list)) { | |
1030 | struct ceph_cap *cap; | |
1031 | /* zero out the in-progress message */ | |
1032 | cap = list_first_entry(&tmp_list, | |
1033 | struct ceph_cap, session_caps); | |
1034 | list_del(&cap->session_caps); | |
1035 | ceph_put_cap(mdsc, cap); | |
2f2dc053 | 1036 | } |
2f2dc053 SW |
1037 | } |
1038 | ||
1c841a96 YZ |
1039 | static void cleanup_session_requests(struct ceph_mds_client *mdsc, |
1040 | struct ceph_mds_session *session) | |
1041 | { | |
1042 | struct ceph_mds_request *req; | |
1043 | struct rb_node *p; | |
1044 | ||
1045 | dout("cleanup_session_requests mds%d\n", session->s_mds); | |
1046 | mutex_lock(&mdsc->mutex); | |
1047 | while (!list_empty(&session->s_unsafe)) { | |
1048 | req = list_first_entry(&session->s_unsafe, | |
1049 | struct ceph_mds_request, r_unsafe_item); | |
1050 | list_del_init(&req->r_unsafe_item); | |
1051 | pr_info(" dropping unsafe request %llu\n", req->r_tid); | |
1052 | __unregister_request(mdsc, req); | |
1053 | } | |
1054 | /* zero r_attempts, so kick_requests() will re-send requests */ | |
1055 | p = rb_first(&mdsc->request_tree); | |
1056 | while (p) { | |
1057 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
1058 | p = rb_next(p); | |
1059 | if (req->r_session && | |
1060 | req->r_session->s_mds == session->s_mds) | |
1061 | req->r_attempts = 0; | |
1062 | } | |
1063 | mutex_unlock(&mdsc->mutex); | |
1064 | } | |
1065 | ||
2f2dc053 | 1066 | /* |
f818a736 SW |
1067 | * Helper to safely iterate over all caps associated with a session, with |
1068 | * special care taken to handle a racing __ceph_remove_cap(). | |
2f2dc053 | 1069 | * |
f818a736 | 1070 | * Caller must hold session s_mutex. |
2f2dc053 SW |
1071 | */ |
1072 | static int iterate_session_caps(struct ceph_mds_session *session, | |
1073 | int (*cb)(struct inode *, struct ceph_cap *, | |
1074 | void *), void *arg) | |
1075 | { | |
7c1332b8 SW |
1076 | struct list_head *p; |
1077 | struct ceph_cap *cap; | |
1078 | struct inode *inode, *last_inode = NULL; | |
1079 | struct ceph_cap *old_cap = NULL; | |
2f2dc053 SW |
1080 | int ret; |
1081 | ||
1082 | dout("iterate_session_caps %p mds%d\n", session, session->s_mds); | |
1083 | spin_lock(&session->s_cap_lock); | |
7c1332b8 SW |
1084 | p = session->s_caps.next; |
1085 | while (p != &session->s_caps) { | |
1086 | cap = list_entry(p, struct ceph_cap, session_caps); | |
2f2dc053 | 1087 | inode = igrab(&cap->ci->vfs_inode); |
7c1332b8 SW |
1088 | if (!inode) { |
1089 | p = p->next; | |
2f2dc053 | 1090 | continue; |
7c1332b8 SW |
1091 | } |
1092 | session->s_cap_iterator = cap; | |
2f2dc053 | 1093 | spin_unlock(&session->s_cap_lock); |
7c1332b8 SW |
1094 | |
1095 | if (last_inode) { | |
1096 | iput(last_inode); | |
1097 | last_inode = NULL; | |
1098 | } | |
1099 | if (old_cap) { | |
37151668 | 1100 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 SW |
1101 | old_cap = NULL; |
1102 | } | |
1103 | ||
2f2dc053 | 1104 | ret = cb(inode, cap, arg); |
7c1332b8 SW |
1105 | last_inode = inode; |
1106 | ||
2f2dc053 | 1107 | spin_lock(&session->s_cap_lock); |
7c1332b8 SW |
1108 | p = p->next; |
1109 | if (cap->ci == NULL) { | |
1110 | dout("iterate_session_caps finishing cap %p removal\n", | |
1111 | cap); | |
1112 | BUG_ON(cap->session != session); | |
745a8e3b | 1113 | cap->session = NULL; |
7c1332b8 SW |
1114 | list_del_init(&cap->session_caps); |
1115 | session->s_nr_caps--; | |
745a8e3b YZ |
1116 | if (cap->queue_release) { |
1117 | list_add_tail(&cap->session_caps, | |
1118 | &session->s_cap_releases); | |
1119 | session->s_num_cap_releases++; | |
1120 | } else { | |
1121 | old_cap = cap; /* put_cap it w/o locks held */ | |
1122 | } | |
7c1332b8 | 1123 | } |
5dacf091 SW |
1124 | if (ret < 0) |
1125 | goto out; | |
2f2dc053 | 1126 | } |
5dacf091 SW |
1127 | ret = 0; |
1128 | out: | |
7c1332b8 | 1129 | session->s_cap_iterator = NULL; |
2f2dc053 | 1130 | spin_unlock(&session->s_cap_lock); |
7c1332b8 | 1131 | |
e96a650a | 1132 | iput(last_inode); |
7c1332b8 | 1133 | if (old_cap) |
37151668 | 1134 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 | 1135 | |
5dacf091 | 1136 | return ret; |
2f2dc053 SW |
1137 | } |
1138 | ||
1139 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |
6c99f254 | 1140 | void *arg) |
2f2dc053 SW |
1141 | { |
1142 | struct ceph_inode_info *ci = ceph_inode(inode); | |
6c99f254 SW |
1143 | int drop = 0; |
1144 | ||
2f2dc053 SW |
1145 | dout("removing cap %p, ci is %p, inode is %p\n", |
1146 | cap, ci, &ci->vfs_inode); | |
be655596 | 1147 | spin_lock(&ci->i_ceph_lock); |
a096b09a | 1148 | __ceph_remove_cap(cap, false); |
571ade33 | 1149 | if (!ci->i_auth_cap) { |
6c99f254 | 1150 | struct ceph_mds_client *mdsc = |
3d14c5d2 | 1151 | ceph_sb_to_client(inode->i_sb)->mdsc; |
6c99f254 SW |
1152 | |
1153 | spin_lock(&mdsc->cap_dirty_lock); | |
1154 | if (!list_empty(&ci->i_dirty_item)) { | |
1155 | pr_info(" dropping dirty %s state for %p %lld\n", | |
1156 | ceph_cap_string(ci->i_dirty_caps), | |
1157 | inode, ceph_ino(inode)); | |
1158 | ci->i_dirty_caps = 0; | |
1159 | list_del_init(&ci->i_dirty_item); | |
1160 | drop = 1; | |
1161 | } | |
1162 | if (!list_empty(&ci->i_flushing_item)) { | |
1163 | pr_info(" dropping dirty+flushing %s state for %p %lld\n", | |
1164 | ceph_cap_string(ci->i_flushing_caps), | |
1165 | inode, ceph_ino(inode)); | |
1166 | ci->i_flushing_caps = 0; | |
1167 | list_del_init(&ci->i_flushing_item); | |
1168 | mdsc->num_cap_flushing--; | |
1169 | drop = 1; | |
1170 | } | |
6c99f254 SW |
1171 | spin_unlock(&mdsc->cap_dirty_lock); |
1172 | } | |
be655596 | 1173 | spin_unlock(&ci->i_ceph_lock); |
6c99f254 SW |
1174 | while (drop--) |
1175 | iput(inode); | |
2f2dc053 SW |
1176 | return 0; |
1177 | } | |
1178 | ||
1179 | /* | |
1180 | * caller must hold session s_mutex | |
1181 | */ | |
1182 | static void remove_session_caps(struct ceph_mds_session *session) | |
1183 | { | |
1184 | dout("remove_session_caps on %p\n", session); | |
1185 | iterate_session_caps(session, remove_session_caps_cb, NULL); | |
6f60f889 YZ |
1186 | |
1187 | spin_lock(&session->s_cap_lock); | |
1188 | if (session->s_nr_caps > 0) { | |
1189 | struct super_block *sb = session->s_mdsc->fsc->sb; | |
1190 | struct inode *inode; | |
1191 | struct ceph_cap *cap, *prev = NULL; | |
1192 | struct ceph_vino vino; | |
1193 | /* | |
1194 | * iterate_session_caps() skips inodes that are being | |
1195 | * deleted, we need to wait until deletions are complete. | |
1196 | * __wait_on_freeing_inode() is designed for the job, | |
1197 | * but it is not exported, so use lookup inode function | |
1198 | * to access it. | |
1199 | */ | |
1200 | while (!list_empty(&session->s_caps)) { | |
1201 | cap = list_entry(session->s_caps.next, | |
1202 | struct ceph_cap, session_caps); | |
1203 | if (cap == prev) | |
1204 | break; | |
1205 | prev = cap; | |
1206 | vino = cap->ci->i_vino; | |
1207 | spin_unlock(&session->s_cap_lock); | |
1208 | ||
ed284c49 | 1209 | inode = ceph_find_inode(sb, vino); |
6f60f889 YZ |
1210 | iput(inode); |
1211 | ||
1212 | spin_lock(&session->s_cap_lock); | |
1213 | } | |
1214 | } | |
745a8e3b YZ |
1215 | |
1216 | // drop cap expires and unlock s_cap_lock | |
1217 | cleanup_cap_releases(session->s_mdsc, session); | |
6f60f889 | 1218 | |
2f2dc053 | 1219 | BUG_ON(session->s_nr_caps > 0); |
6c99f254 | 1220 | BUG_ON(!list_empty(&session->s_cap_flushing)); |
2f2dc053 SW |
1221 | } |
1222 | ||
1223 | /* | |
1224 | * wake up any threads waiting on this session's caps. if the cap is | |
1225 | * old (didn't get renewed on the client reconnect), remove it now. | |
1226 | * | |
1227 | * caller must hold s_mutex. | |
1228 | */ | |
1229 | static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, | |
1230 | void *arg) | |
1231 | { | |
0dc2570f SW |
1232 | struct ceph_inode_info *ci = ceph_inode(inode); |
1233 | ||
03066f23 | 1234 | wake_up_all(&ci->i_cap_wq); |
0dc2570f | 1235 | if (arg) { |
be655596 | 1236 | spin_lock(&ci->i_ceph_lock); |
0dc2570f SW |
1237 | ci->i_wanted_max_size = 0; |
1238 | ci->i_requested_max_size = 0; | |
be655596 | 1239 | spin_unlock(&ci->i_ceph_lock); |
0dc2570f | 1240 | } |
2f2dc053 SW |
1241 | return 0; |
1242 | } | |
1243 | ||
0dc2570f SW |
1244 | static void wake_up_session_caps(struct ceph_mds_session *session, |
1245 | int reconnect) | |
2f2dc053 SW |
1246 | { |
1247 | dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); | |
0dc2570f SW |
1248 | iterate_session_caps(session, wake_up_session_cb, |
1249 | (void *)(unsigned long)reconnect); | |
2f2dc053 SW |
1250 | } |
1251 | ||
1252 | /* | |
1253 | * Send periodic message to MDS renewing all currently held caps. The | |
1254 | * ack will reset the expiration for all caps from this session. | |
1255 | * | |
1256 | * caller holds s_mutex | |
1257 | */ | |
1258 | static int send_renew_caps(struct ceph_mds_client *mdsc, | |
1259 | struct ceph_mds_session *session) | |
1260 | { | |
1261 | struct ceph_msg *msg; | |
1262 | int state; | |
1263 | ||
1264 | if (time_after_eq(jiffies, session->s_cap_ttl) && | |
1265 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) | |
1266 | pr_info("mds%d caps stale\n", session->s_mds); | |
e4cb4cb8 | 1267 | session->s_renew_requested = jiffies; |
2f2dc053 SW |
1268 | |
1269 | /* do not try to renew caps until a recovering mds has reconnected | |
1270 | * with its clients. */ | |
1271 | state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); | |
1272 | if (state < CEPH_MDS_STATE_RECONNECT) { | |
1273 | dout("send_renew_caps ignoring mds%d (%s)\n", | |
1274 | session->s_mds, ceph_mds_state_name(state)); | |
1275 | return 0; | |
1276 | } | |
1277 | ||
1278 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, | |
1279 | ceph_mds_state_name(state)); | |
2f2dc053 SW |
1280 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
1281 | ++session->s_renew_seq); | |
a79832f2 SW |
1282 | if (!msg) |
1283 | return -ENOMEM; | |
2f2dc053 SW |
1284 | ceph_con_send(&session->s_con, msg); |
1285 | return 0; | |
1286 | } | |
1287 | ||
186e4f7a YZ |
1288 | static int send_flushmsg_ack(struct ceph_mds_client *mdsc, |
1289 | struct ceph_mds_session *session, u64 seq) | |
1290 | { | |
1291 | struct ceph_msg *msg; | |
1292 | ||
1293 | dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", | |
a687ecaf | 1294 | session->s_mds, ceph_session_state_name(session->s_state), seq); |
186e4f7a YZ |
1295 | msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); |
1296 | if (!msg) | |
1297 | return -ENOMEM; | |
1298 | ceph_con_send(&session->s_con, msg); | |
1299 | return 0; | |
1300 | } | |
1301 | ||
1302 | ||
2f2dc053 SW |
1303 | /* |
1304 | * Note new cap ttl, and any transition from stale -> not stale (fresh?). | |
0dc2570f SW |
1305 | * |
1306 | * Called under session->s_mutex | |
2f2dc053 SW |
1307 | */ |
1308 | static void renewed_caps(struct ceph_mds_client *mdsc, | |
1309 | struct ceph_mds_session *session, int is_renew) | |
1310 | { | |
1311 | int was_stale; | |
1312 | int wake = 0; | |
1313 | ||
1314 | spin_lock(&session->s_cap_lock); | |
1ce208a6 | 1315 | was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); |
2f2dc053 SW |
1316 | |
1317 | session->s_cap_ttl = session->s_renew_requested + | |
1318 | mdsc->mdsmap->m_session_timeout*HZ; | |
1319 | ||
1320 | if (was_stale) { | |
1321 | if (time_before(jiffies, session->s_cap_ttl)) { | |
1322 | pr_info("mds%d caps renewed\n", session->s_mds); | |
1323 | wake = 1; | |
1324 | } else { | |
1325 | pr_info("mds%d caps still stale\n", session->s_mds); | |
1326 | } | |
1327 | } | |
1328 | dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", | |
1329 | session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", | |
1330 | time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); | |
1331 | spin_unlock(&session->s_cap_lock); | |
1332 | ||
1333 | if (wake) | |
0dc2570f | 1334 | wake_up_session_caps(session, 0); |
2f2dc053 SW |
1335 | } |
1336 | ||
1337 | /* | |
1338 | * send a session close request | |
1339 | */ | |
1340 | static int request_close_session(struct ceph_mds_client *mdsc, | |
1341 | struct ceph_mds_session *session) | |
1342 | { | |
1343 | struct ceph_msg *msg; | |
2f2dc053 SW |
1344 | |
1345 | dout("request_close_session mds%d state %s seq %lld\n", | |
a687ecaf | 1346 | session->s_mds, ceph_session_state_name(session->s_state), |
2f2dc053 SW |
1347 | session->s_seq); |
1348 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); | |
a79832f2 SW |
1349 | if (!msg) |
1350 | return -ENOMEM; | |
1351 | ceph_con_send(&session->s_con, msg); | |
1352 | return 0; | |
2f2dc053 SW |
1353 | } |
1354 | ||
1355 | /* | |
1356 | * Called with s_mutex held. | |
1357 | */ | |
1358 | static int __close_session(struct ceph_mds_client *mdsc, | |
1359 | struct ceph_mds_session *session) | |
1360 | { | |
1361 | if (session->s_state >= CEPH_MDS_SESSION_CLOSING) | |
1362 | return 0; | |
1363 | session->s_state = CEPH_MDS_SESSION_CLOSING; | |
1364 | return request_close_session(mdsc, session); | |
1365 | } | |
1366 | ||
1367 | /* | |
1368 | * Trim old(er) caps. | |
1369 | * | |
1370 | * Because we can't cache an inode without one or more caps, we do | |
1371 | * this indirectly: if a cap is unused, we prune its aliases, at which | |
1372 | * point the inode will hopefully get dropped to. | |
1373 | * | |
1374 | * Yes, this is a bit sloppy. Our only real goal here is to respond to | |
1375 | * memory pressure from the MDS, though, so it needn't be perfect. | |
1376 | */ | |
1377 | static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) | |
1378 | { | |
1379 | struct ceph_mds_session *session = arg; | |
1380 | struct ceph_inode_info *ci = ceph_inode(inode); | |
979abfdd | 1381 | int used, wanted, oissued, mine; |
2f2dc053 SW |
1382 | |
1383 | if (session->s_trim_caps <= 0) | |
1384 | return -1; | |
1385 | ||
be655596 | 1386 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
1387 | mine = cap->issued | cap->implemented; |
1388 | used = __ceph_caps_used(ci); | |
979abfdd | 1389 | wanted = __ceph_caps_file_wanted(ci); |
2f2dc053 SW |
1390 | oissued = __ceph_caps_issued_other(ci, cap); |
1391 | ||
979abfdd | 1392 | dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", |
2f2dc053 | 1393 | inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), |
979abfdd YZ |
1394 | ceph_cap_string(used), ceph_cap_string(wanted)); |
1395 | if (cap == ci->i_auth_cap) { | |
622f3e25 YZ |
1396 | if (ci->i_dirty_caps || ci->i_flushing_caps || |
1397 | !list_empty(&ci->i_cap_snaps)) | |
979abfdd YZ |
1398 | goto out; |
1399 | if ((used | wanted) & CEPH_CAP_ANY_WR) | |
1400 | goto out; | |
1401 | } | |
1402 | if ((used | wanted) & ~oissued & mine) | |
2f2dc053 SW |
1403 | goto out; /* we need these caps */ |
1404 | ||
1405 | session->s_trim_caps--; | |
1406 | if (oissued) { | |
1407 | /* we aren't the only cap.. just remove us */ | |
a096b09a | 1408 | __ceph_remove_cap(cap, true); |
2f2dc053 SW |
1409 | } else { |
1410 | /* try to drop referring dentries */ | |
be655596 | 1411 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1412 | d_prune_aliases(inode); |
1413 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | |
1414 | inode, cap, atomic_read(&inode->i_count)); | |
1415 | return 0; | |
1416 | } | |
1417 | ||
1418 | out: | |
be655596 | 1419 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1420 | return 0; |
1421 | } | |
1422 | ||
1423 | /* | |
1424 | * Trim session cap count down to some max number. | |
1425 | */ | |
1426 | static int trim_caps(struct ceph_mds_client *mdsc, | |
1427 | struct ceph_mds_session *session, | |
1428 | int max_caps) | |
1429 | { | |
1430 | int trim_caps = session->s_nr_caps - max_caps; | |
1431 | ||
1432 | dout("trim_caps mds%d start: %d / %d, trim %d\n", | |
1433 | session->s_mds, session->s_nr_caps, max_caps, trim_caps); | |
1434 | if (trim_caps > 0) { | |
1435 | session->s_trim_caps = trim_caps; | |
1436 | iterate_session_caps(session, trim_caps_cb, session); | |
1437 | dout("trim_caps mds%d done: %d / %d, trimmed %d\n", | |
1438 | session->s_mds, session->s_nr_caps, max_caps, | |
1439 | trim_caps - session->s_trim_caps); | |
5dacf091 | 1440 | session->s_trim_caps = 0; |
2f2dc053 | 1441 | } |
a56371d9 | 1442 | |
a56371d9 | 1443 | ceph_send_cap_releases(mdsc, session); |
2f2dc053 SW |
1444 | return 0; |
1445 | } | |
1446 | ||
affbc19a YZ |
1447 | static int check_cap_flush(struct ceph_inode_info *ci, |
1448 | u64 want_flush_seq, u64 want_snap_seq) | |
d3383a8e | 1449 | { |
affbc19a | 1450 | int ret1 = 1, ret2 = 1; |
d3383a8e | 1451 | spin_lock(&ci->i_ceph_lock); |
affbc19a YZ |
1452 | if (want_flush_seq > 0 && ci->i_flushing_caps) |
1453 | ret1 = ci->i_cap_flush_seq >= want_flush_seq; | |
1454 | ||
1455 | if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) { | |
1456 | struct ceph_cap_snap *capsnap = | |
1457 | list_first_entry(&ci->i_cap_snaps, | |
1458 | struct ceph_cap_snap, ci_item); | |
1459 | ret2 = capsnap->follows >= want_snap_seq; | |
1460 | } | |
d3383a8e | 1461 | spin_unlock(&ci->i_ceph_lock); |
affbc19a | 1462 | return ret1 && ret2; |
d3383a8e YZ |
1463 | } |
1464 | ||
2f2dc053 SW |
1465 | /* |
1466 | * flush all dirty inode data to disk. | |
1467 | * | |
1468 | * returns true if we've flushed through want_flush_seq | |
1469 | */ | |
affbc19a YZ |
1470 | static void wait_caps_flush(struct ceph_mds_client *mdsc, |
1471 | u64 want_flush_seq, u64 want_snap_seq) | |
2f2dc053 | 1472 | { |
d3383a8e | 1473 | int mds; |
2f2dc053 SW |
1474 | |
1475 | dout("check_cap_flush want %lld\n", want_flush_seq); | |
1476 | mutex_lock(&mdsc->mutex); | |
affbc19a | 1477 | for (mds = 0; mds < mdsc->max_sessions; ) { |
2f2dc053 | 1478 | struct ceph_mds_session *session = mdsc->sessions[mds]; |
affbc19a | 1479 | struct inode *inode1 = NULL, *inode2 = NULL; |
2f2dc053 | 1480 | |
affbc19a YZ |
1481 | if (!session) { |
1482 | mds++; | |
2f2dc053 | 1483 | continue; |
affbc19a | 1484 | } |
2f2dc053 SW |
1485 | get_session(session); |
1486 | mutex_unlock(&mdsc->mutex); | |
1487 | ||
1488 | mutex_lock(&session->s_mutex); | |
1489 | if (!list_empty(&session->s_cap_flushing)) { | |
1490 | struct ceph_inode_info *ci = | |
affbc19a YZ |
1491 | list_first_entry(&session->s_cap_flushing, |
1492 | struct ceph_inode_info, | |
1493 | i_flushing_item); | |
2f2dc053 | 1494 | |
affbc19a | 1495 | if (!check_cap_flush(ci, want_flush_seq, 0)) { |
2f2dc053 | 1496 | dout("check_cap_flush still flushing %p " |
d3383a8e YZ |
1497 | "seq %lld <= %lld to mds%d\n", |
1498 | &ci->vfs_inode, ci->i_cap_flush_seq, | |
affbc19a YZ |
1499 | want_flush_seq, mds); |
1500 | inode1 = igrab(&ci->vfs_inode); | |
1501 | } | |
1502 | } | |
1503 | if (!list_empty(&session->s_cap_snaps_flushing)) { | |
1504 | struct ceph_cap_snap *capsnap = | |
1505 | list_first_entry(&session->s_cap_snaps_flushing, | |
1506 | struct ceph_cap_snap, | |
1507 | flushing_item); | |
1508 | struct ceph_inode_info *ci = capsnap->ci; | |
1509 | if (!check_cap_flush(ci, 0, want_snap_seq)) { | |
1510 | dout("check_cap_flush still flushing snap %p " | |
1511 | "follows %lld <= %lld to mds%d\n", | |
1512 | &ci->vfs_inode, capsnap->follows, | |
1513 | want_snap_seq, mds); | |
1514 | inode2 = igrab(&ci->vfs_inode); | |
2f2dc053 | 1515 | } |
2f2dc053 SW |
1516 | } |
1517 | mutex_unlock(&session->s_mutex); | |
1518 | ceph_put_mds_session(session); | |
1519 | ||
affbc19a | 1520 | if (inode1) { |
d3383a8e | 1521 | wait_event(mdsc->cap_flushing_wq, |
affbc19a YZ |
1522 | check_cap_flush(ceph_inode(inode1), |
1523 | want_flush_seq, 0)); | |
1524 | iput(inode1); | |
1525 | } | |
1526 | if (inode2) { | |
1527 | wait_event(mdsc->cap_flushing_wq, | |
1528 | check_cap_flush(ceph_inode(inode2), | |
1529 | 0, want_snap_seq)); | |
1530 | iput(inode2); | |
d3383a8e YZ |
1531 | } |
1532 | ||
affbc19a YZ |
1533 | if (!inode1 && !inode2) |
1534 | mds++; | |
1535 | ||
2f2dc053 SW |
1536 | mutex_lock(&mdsc->mutex); |
1537 | } | |
1538 | ||
1539 | mutex_unlock(&mdsc->mutex); | |
1540 | dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); | |
2f2dc053 SW |
1541 | } |
1542 | ||
1543 | /* | |
1544 | * called under s_mutex | |
1545 | */ | |
3d7ded4d SW |
1546 | void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
1547 | struct ceph_mds_session *session) | |
2f2dc053 | 1548 | { |
745a8e3b YZ |
1549 | struct ceph_msg *msg = NULL; |
1550 | struct ceph_mds_cap_release *head; | |
1551 | struct ceph_mds_cap_item *item; | |
1552 | struct ceph_cap *cap; | |
1553 | LIST_HEAD(tmp_list); | |
1554 | int num_cap_releases; | |
2f2dc053 | 1555 | |
0f8605f2 | 1556 | spin_lock(&session->s_cap_lock); |
745a8e3b YZ |
1557 | again: |
1558 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1559 | num_cap_releases = session->s_num_cap_releases; | |
1560 | session->s_num_cap_releases = 0; | |
2f2dc053 | 1561 | spin_unlock(&session->s_cap_lock); |
e01a5946 | 1562 | |
745a8e3b YZ |
1563 | while (!list_empty(&tmp_list)) { |
1564 | if (!msg) { | |
1565 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, | |
1566 | PAGE_CACHE_SIZE, GFP_NOFS, false); | |
1567 | if (!msg) | |
1568 | goto out_err; | |
1569 | head = msg->front.iov_base; | |
1570 | head->num = cpu_to_le32(0); | |
1571 | msg->front.iov_len = sizeof(*head); | |
1572 | } | |
1573 | cap = list_first_entry(&tmp_list, struct ceph_cap, | |
1574 | session_caps); | |
1575 | list_del(&cap->session_caps); | |
1576 | num_cap_releases--; | |
e01a5946 | 1577 | |
00bd8edb | 1578 | head = msg->front.iov_base; |
745a8e3b YZ |
1579 | le32_add_cpu(&head->num, 1); |
1580 | item = msg->front.iov_base + msg->front.iov_len; | |
1581 | item->ino = cpu_to_le64(cap->cap_ino); | |
1582 | item->cap_id = cpu_to_le64(cap->cap_id); | |
1583 | item->migrate_seq = cpu_to_le32(cap->mseq); | |
1584 | item->seq = cpu_to_le32(cap->issue_seq); | |
1585 | msg->front.iov_len += sizeof(*item); | |
1586 | ||
1587 | ceph_put_cap(mdsc, cap); | |
1588 | ||
1589 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { | |
1590 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
1591 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1592 | ceph_con_send(&session->s_con, msg); | |
1593 | msg = NULL; | |
1594 | } | |
00bd8edb | 1595 | } |
e01a5946 | 1596 | |
745a8e3b | 1597 | BUG_ON(num_cap_releases != 0); |
e01a5946 | 1598 | |
745a8e3b YZ |
1599 | spin_lock(&session->s_cap_lock); |
1600 | if (!list_empty(&session->s_cap_releases)) | |
1601 | goto again; | |
1602 | spin_unlock(&session->s_cap_lock); | |
1603 | ||
1604 | if (msg) { | |
1605 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
1606 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1607 | ceph_con_send(&session->s_con, msg); | |
e01a5946 | 1608 | } |
745a8e3b YZ |
1609 | return; |
1610 | out_err: | |
1611 | pr_err("send_cap_releases mds%d, failed to allocate message\n", | |
1612 | session->s_mds); | |
1613 | spin_lock(&session->s_cap_lock); | |
1614 | list_splice(&tmp_list, &session->s_cap_releases); | |
1615 | session->s_num_cap_releases += num_cap_releases; | |
1616 | spin_unlock(&session->s_cap_lock); | |
e01a5946 SW |
1617 | } |
1618 | ||
2f2dc053 SW |
1619 | /* |
1620 | * requests | |
1621 | */ | |
1622 | ||
54008399 YZ |
1623 | int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, |
1624 | struct inode *dir) | |
1625 | { | |
1626 | struct ceph_inode_info *ci = ceph_inode(dir); | |
1627 | struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; | |
1628 | struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; | |
1629 | size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) + | |
1630 | sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease); | |
1631 | int order, num_entries; | |
1632 | ||
1633 | spin_lock(&ci->i_ceph_lock); | |
1634 | num_entries = ci->i_files + ci->i_subdirs; | |
1635 | spin_unlock(&ci->i_ceph_lock); | |
1636 | num_entries = max(num_entries, 1); | |
1637 | num_entries = min(num_entries, opt->max_readdir); | |
1638 | ||
1639 | order = get_order(size * num_entries); | |
1640 | while (order >= 0) { | |
1641 | rinfo->dir_in = (void*)__get_free_pages(GFP_NOFS | __GFP_NOWARN, | |
1642 | order); | |
1643 | if (rinfo->dir_in) | |
1644 | break; | |
1645 | order--; | |
1646 | } | |
1647 | if (!rinfo->dir_in) | |
1648 | return -ENOMEM; | |
1649 | ||
1650 | num_entries = (PAGE_SIZE << order) / size; | |
1651 | num_entries = min(num_entries, opt->max_readdir); | |
1652 | ||
1653 | rinfo->dir_buf_size = PAGE_SIZE << order; | |
1654 | req->r_num_caps = num_entries + 1; | |
1655 | req->r_args.readdir.max_entries = cpu_to_le32(num_entries); | |
1656 | req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); | |
1657 | return 0; | |
1658 | } | |
1659 | ||
2f2dc053 SW |
1660 | /* |
1661 | * Create an mds request. | |
1662 | */ | |
1663 | struct ceph_mds_request * | |
1664 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |
1665 | { | |
1666 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); | |
1667 | ||
1668 | if (!req) | |
1669 | return ERR_PTR(-ENOMEM); | |
1670 | ||
b4556396 | 1671 | mutex_init(&req->r_fill_mutex); |
37151668 | 1672 | req->r_mdsc = mdsc; |
2f2dc053 SW |
1673 | req->r_started = jiffies; |
1674 | req->r_resend_mds = -1; | |
1675 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | |
1676 | req->r_fmode = -1; | |
153c8e6b | 1677 | kref_init(&req->r_kref); |
2f2dc053 SW |
1678 | INIT_LIST_HEAD(&req->r_wait); |
1679 | init_completion(&req->r_completion); | |
1680 | init_completion(&req->r_safe_completion); | |
1681 | INIT_LIST_HEAD(&req->r_unsafe_item); | |
1682 | ||
b8e69066 SW |
1683 | req->r_stamp = CURRENT_TIME; |
1684 | ||
2f2dc053 SW |
1685 | req->r_op = op; |
1686 | req->r_direct_mode = mode; | |
1687 | return req; | |
1688 | } | |
1689 | ||
1690 | /* | |
44ca18f2 | 1691 | * return oldest (lowest) request, tid in request tree, 0 if none. |
2f2dc053 SW |
1692 | * |
1693 | * called under mdsc->mutex. | |
1694 | */ | |
44ca18f2 SW |
1695 | static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) |
1696 | { | |
1697 | if (RB_EMPTY_ROOT(&mdsc->request_tree)) | |
1698 | return NULL; | |
1699 | return rb_entry(rb_first(&mdsc->request_tree), | |
1700 | struct ceph_mds_request, r_node); | |
1701 | } | |
1702 | ||
e8a7b8b1 | 1703 | static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) |
2f2dc053 | 1704 | { |
e8a7b8b1 | 1705 | return mdsc->oldest_tid; |
2f2dc053 SW |
1706 | } |
1707 | ||
1708 | /* | |
1709 | * Build a dentry's path. Allocate on heap; caller must kfree. Based | |
1710 | * on build_path_from_dentry in fs/cifs/dir.c. | |
1711 | * | |
1712 | * If @stop_on_nosnap, generate path relative to the first non-snapped | |
1713 | * inode. | |
1714 | * | |
1715 | * Encode hidden .snap dirs as a double /, i.e. | |
1716 | * foo/.snap/bar -> foo//bar | |
1717 | */ | |
1718 | char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, | |
1719 | int stop_on_nosnap) | |
1720 | { | |
1721 | struct dentry *temp; | |
1722 | char *path; | |
1723 | int len, pos; | |
1b71fe2e | 1724 | unsigned seq; |
2f2dc053 SW |
1725 | |
1726 | if (dentry == NULL) | |
1727 | return ERR_PTR(-EINVAL); | |
1728 | ||
1729 | retry: | |
1730 | len = 0; | |
1b71fe2e AV |
1731 | seq = read_seqbegin(&rename_lock); |
1732 | rcu_read_lock(); | |
2f2dc053 | 1733 | for (temp = dentry; !IS_ROOT(temp);) { |
2b0143b5 | 1734 | struct inode *inode = d_inode(temp); |
2f2dc053 SW |
1735 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) |
1736 | len++; /* slash only */ | |
1737 | else if (stop_on_nosnap && inode && | |
1738 | ceph_snap(inode) == CEPH_NOSNAP) | |
1739 | break; | |
1740 | else | |
1741 | len += 1 + temp->d_name.len; | |
1742 | temp = temp->d_parent; | |
2f2dc053 | 1743 | } |
1b71fe2e | 1744 | rcu_read_unlock(); |
2f2dc053 SW |
1745 | if (len) |
1746 | len--; /* no leading '/' */ | |
1747 | ||
1748 | path = kmalloc(len+1, GFP_NOFS); | |
1749 | if (path == NULL) | |
1750 | return ERR_PTR(-ENOMEM); | |
1751 | pos = len; | |
1752 | path[pos] = 0; /* trailing null */ | |
1b71fe2e | 1753 | rcu_read_lock(); |
2f2dc053 | 1754 | for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { |
1b71fe2e | 1755 | struct inode *inode; |
2f2dc053 | 1756 | |
1b71fe2e | 1757 | spin_lock(&temp->d_lock); |
2b0143b5 | 1758 | inode = d_inode(temp); |
2f2dc053 | 1759 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
104648ad | 1760 | dout("build_path path+%d: %p SNAPDIR\n", |
2f2dc053 SW |
1761 | pos, temp); |
1762 | } else if (stop_on_nosnap && inode && | |
1763 | ceph_snap(inode) == CEPH_NOSNAP) { | |
9d5a09e6 | 1764 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1765 | break; |
1766 | } else { | |
1767 | pos -= temp->d_name.len; | |
1b71fe2e AV |
1768 | if (pos < 0) { |
1769 | spin_unlock(&temp->d_lock); | |
2f2dc053 | 1770 | break; |
1b71fe2e | 1771 | } |
2f2dc053 SW |
1772 | strncpy(path + pos, temp->d_name.name, |
1773 | temp->d_name.len); | |
2f2dc053 | 1774 | } |
1b71fe2e | 1775 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1776 | if (pos) |
1777 | path[--pos] = '/'; | |
1778 | temp = temp->d_parent; | |
2f2dc053 | 1779 | } |
1b71fe2e AV |
1780 | rcu_read_unlock(); |
1781 | if (pos != 0 || read_seqretry(&rename_lock, seq)) { | |
104648ad | 1782 | pr_err("build_path did not end path lookup where " |
2f2dc053 SW |
1783 | "expected, namelen is %d, pos is %d\n", len, pos); |
1784 | /* presumably this is only possible if racing with a | |
1785 | rename of one of the parent directories (we can not | |
1786 | lock the dentries above us to prevent this, but | |
1787 | retrying should be harmless) */ | |
1788 | kfree(path); | |
1789 | goto retry; | |
1790 | } | |
1791 | ||
2b0143b5 | 1792 | *base = ceph_ino(d_inode(temp)); |
2f2dc053 | 1793 | *plen = len; |
104648ad | 1794 | dout("build_path on %p %d built %llx '%.*s'\n", |
84d08fa8 | 1795 | dentry, d_count(dentry), *base, len, path); |
2f2dc053 SW |
1796 | return path; |
1797 | } | |
1798 | ||
1799 | static int build_dentry_path(struct dentry *dentry, | |
1800 | const char **ppath, int *ppathlen, u64 *pino, | |
1801 | int *pfreepath) | |
1802 | { | |
1803 | char *path; | |
1804 | ||
2b0143b5 DH |
1805 | if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) { |
1806 | *pino = ceph_ino(d_inode(dentry->d_parent)); | |
2f2dc053 SW |
1807 | *ppath = dentry->d_name.name; |
1808 | *ppathlen = dentry->d_name.len; | |
1809 | return 0; | |
1810 | } | |
1811 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1812 | if (IS_ERR(path)) | |
1813 | return PTR_ERR(path); | |
1814 | *ppath = path; | |
1815 | *pfreepath = 1; | |
1816 | return 0; | |
1817 | } | |
1818 | ||
1819 | static int build_inode_path(struct inode *inode, | |
1820 | const char **ppath, int *ppathlen, u64 *pino, | |
1821 | int *pfreepath) | |
1822 | { | |
1823 | struct dentry *dentry; | |
1824 | char *path; | |
1825 | ||
1826 | if (ceph_snap(inode) == CEPH_NOSNAP) { | |
1827 | *pino = ceph_ino(inode); | |
1828 | *ppathlen = 0; | |
1829 | return 0; | |
1830 | } | |
1831 | dentry = d_find_alias(inode); | |
1832 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1833 | dput(dentry); | |
1834 | if (IS_ERR(path)) | |
1835 | return PTR_ERR(path); | |
1836 | *ppath = path; | |
1837 | *pfreepath = 1; | |
1838 | return 0; | |
1839 | } | |
1840 | ||
1841 | /* | |
1842 | * request arguments may be specified via an inode *, a dentry *, or | |
1843 | * an explicit ino+path. | |
1844 | */ | |
1845 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | |
1846 | const char *rpath, u64 rino, | |
1847 | const char **ppath, int *pathlen, | |
1848 | u64 *ino, int *freepath) | |
1849 | { | |
1850 | int r = 0; | |
1851 | ||
1852 | if (rinode) { | |
1853 | r = build_inode_path(rinode, ppath, pathlen, ino, freepath); | |
1854 | dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), | |
1855 | ceph_snap(rinode)); | |
1856 | } else if (rdentry) { | |
1857 | r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); | |
1858 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, | |
1859 | *ppath); | |
795858db | 1860 | } else if (rpath || rino) { |
2f2dc053 SW |
1861 | *ino = rino; |
1862 | *ppath = rpath; | |
b000056a | 1863 | *pathlen = rpath ? strlen(rpath) : 0; |
2f2dc053 SW |
1864 | dout(" path %.*s\n", *pathlen, rpath); |
1865 | } | |
1866 | ||
1867 | return r; | |
1868 | } | |
1869 | ||
1870 | /* | |
1871 | * called under mdsc->mutex | |
1872 | */ | |
1873 | static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |
1874 | struct ceph_mds_request *req, | |
6e6f0923 | 1875 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
1876 | { |
1877 | struct ceph_msg *msg; | |
1878 | struct ceph_mds_request_head *head; | |
1879 | const char *path1 = NULL; | |
1880 | const char *path2 = NULL; | |
1881 | u64 ino1 = 0, ino2 = 0; | |
1882 | int pathlen1 = 0, pathlen2 = 0; | |
1883 | int freepath1 = 0, freepath2 = 0; | |
1884 | int len; | |
1885 | u16 releases; | |
1886 | void *p, *end; | |
1887 | int ret; | |
1888 | ||
1889 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | |
1890 | req->r_path1, req->r_ino1.ino, | |
1891 | &path1, &pathlen1, &ino1, &freepath1); | |
1892 | if (ret < 0) { | |
1893 | msg = ERR_PTR(ret); | |
1894 | goto out; | |
1895 | } | |
1896 | ||
1897 | ret = set_request_path_attr(NULL, req->r_old_dentry, | |
1898 | req->r_path2, req->r_ino2.ino, | |
1899 | &path2, &pathlen2, &ino2, &freepath2); | |
1900 | if (ret < 0) { | |
1901 | msg = ERR_PTR(ret); | |
1902 | goto out_free1; | |
1903 | } | |
1904 | ||
1905 | len = sizeof(*head) + | |
b8e69066 SW |
1906 | pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + |
1907 | sizeof(struct timespec); | |
2f2dc053 SW |
1908 | |
1909 | /* calculate (max) length for cap releases */ | |
1910 | len += sizeof(struct ceph_mds_request_release) * | |
1911 | (!!req->r_inode_drop + !!req->r_dentry_drop + | |
1912 | !!req->r_old_inode_drop + !!req->r_old_dentry_drop); | |
1913 | if (req->r_dentry_drop) | |
1914 | len += req->r_dentry->d_name.len; | |
1915 | if (req->r_old_dentry_drop) | |
1916 | len += req->r_old_dentry->d_name.len; | |
1917 | ||
b61c2763 | 1918 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); |
a79832f2 SW |
1919 | if (!msg) { |
1920 | msg = ERR_PTR(-ENOMEM); | |
2f2dc053 | 1921 | goto out_free2; |
a79832f2 | 1922 | } |
2f2dc053 | 1923 | |
7cfa0313 | 1924 | msg->hdr.version = cpu_to_le16(2); |
6df058c0 SW |
1925 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
1926 | ||
2f2dc053 SW |
1927 | head = msg->front.iov_base; |
1928 | p = msg->front.iov_base + sizeof(*head); | |
1929 | end = msg->front.iov_base + msg->front.iov_len; | |
1930 | ||
1931 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); | |
1932 | head->op = cpu_to_le32(req->r_op); | |
ff3d0046 EB |
1933 | head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); |
1934 | head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); | |
2f2dc053 SW |
1935 | head->args = req->r_args; |
1936 | ||
1937 | ceph_encode_filepath(&p, end, ino1, path1); | |
1938 | ceph_encode_filepath(&p, end, ino2, path2); | |
1939 | ||
e979cf50 SW |
1940 | /* make note of release offset, in case we need to replay */ |
1941 | req->r_request_release_offset = p - msg->front.iov_base; | |
1942 | ||
2f2dc053 SW |
1943 | /* cap releases */ |
1944 | releases = 0; | |
1945 | if (req->r_inode_drop) | |
1946 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 1947 | req->r_inode ? req->r_inode : d_inode(req->r_dentry), |
2f2dc053 SW |
1948 | mds, req->r_inode_drop, req->r_inode_unless, 0); |
1949 | if (req->r_dentry_drop) | |
1950 | releases += ceph_encode_dentry_release(&p, req->r_dentry, | |
1951 | mds, req->r_dentry_drop, req->r_dentry_unless); | |
1952 | if (req->r_old_dentry_drop) | |
1953 | releases += ceph_encode_dentry_release(&p, req->r_old_dentry, | |
1954 | mds, req->r_old_dentry_drop, req->r_old_dentry_unless); | |
1955 | if (req->r_old_inode_drop) | |
1956 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 1957 | d_inode(req->r_old_dentry), |
2f2dc053 | 1958 | mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); |
6e6f0923 YZ |
1959 | |
1960 | if (drop_cap_releases) { | |
1961 | releases = 0; | |
1962 | p = msg->front.iov_base + req->r_request_release_offset; | |
1963 | } | |
1964 | ||
2f2dc053 SW |
1965 | head->num_releases = cpu_to_le16(releases); |
1966 | ||
b8e69066 | 1967 | /* time stamp */ |
1f041a89 YZ |
1968 | { |
1969 | struct ceph_timespec ts; | |
1970 | ceph_encode_timespec(&ts, &req->r_stamp); | |
1971 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
1972 | } | |
b8e69066 | 1973 | |
2f2dc053 SW |
1974 | BUG_ON(p > end); |
1975 | msg->front.iov_len = p - msg->front.iov_base; | |
1976 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
1977 | ||
25e6bae3 YZ |
1978 | if (req->r_pagelist) { |
1979 | struct ceph_pagelist *pagelist = req->r_pagelist; | |
1980 | atomic_inc(&pagelist->refcnt); | |
1981 | ceph_msg_data_add_pagelist(msg, pagelist); | |
1982 | msg->hdr.data_len = cpu_to_le32(pagelist->length); | |
1983 | } else { | |
1984 | msg->hdr.data_len = 0; | |
ebf18f47 | 1985 | } |
02afca6c | 1986 | |
2f2dc053 SW |
1987 | msg->hdr.data_off = cpu_to_le16(0); |
1988 | ||
1989 | out_free2: | |
1990 | if (freepath2) | |
1991 | kfree((char *)path2); | |
1992 | out_free1: | |
1993 | if (freepath1) | |
1994 | kfree((char *)path1); | |
1995 | out: | |
1996 | return msg; | |
1997 | } | |
1998 | ||
1999 | /* | |
2000 | * called under mdsc->mutex if error, under no mutex if | |
2001 | * success. | |
2002 | */ | |
2003 | static void complete_request(struct ceph_mds_client *mdsc, | |
2004 | struct ceph_mds_request *req) | |
2005 | { | |
2006 | if (req->r_callback) | |
2007 | req->r_callback(mdsc, req); | |
2008 | else | |
03066f23 | 2009 | complete_all(&req->r_completion); |
2f2dc053 SW |
2010 | } |
2011 | ||
2012 | /* | |
2013 | * called under mdsc->mutex | |
2014 | */ | |
2015 | static int __prepare_send_request(struct ceph_mds_client *mdsc, | |
2016 | struct ceph_mds_request *req, | |
6e6f0923 | 2017 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
2018 | { |
2019 | struct ceph_mds_request_head *rhead; | |
2020 | struct ceph_msg *msg; | |
2021 | int flags = 0; | |
2022 | ||
2f2dc053 | 2023 | req->r_attempts++; |
e55b71f8 GF |
2024 | if (req->r_inode) { |
2025 | struct ceph_cap *cap = | |
2026 | ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); | |
2027 | ||
2028 | if (cap) | |
2029 | req->r_sent_on_mseq = cap->mseq; | |
2030 | else | |
2031 | req->r_sent_on_mseq = -1; | |
2032 | } | |
2f2dc053 SW |
2033 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
2034 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | |
2035 | ||
01a92f17 | 2036 | if (req->r_got_unsafe) { |
c5c9a0bf | 2037 | void *p; |
01a92f17 SW |
2038 | /* |
2039 | * Replay. Do not regenerate message (and rebuild | |
2040 | * paths, etc.); just use the original message. | |
2041 | * Rebuilding paths will break for renames because | |
2042 | * d_move mangles the src name. | |
2043 | */ | |
2044 | msg = req->r_request; | |
2045 | rhead = msg->front.iov_base; | |
2046 | ||
2047 | flags = le32_to_cpu(rhead->flags); | |
2048 | flags |= CEPH_MDS_FLAG_REPLAY; | |
2049 | rhead->flags = cpu_to_le32(flags); | |
2050 | ||
2051 | if (req->r_target_inode) | |
2052 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | |
2053 | ||
2054 | rhead->num_retry = req->r_attempts - 1; | |
e979cf50 SW |
2055 | |
2056 | /* remove cap/dentry releases from message */ | |
2057 | rhead->num_releases = 0; | |
c5c9a0bf YZ |
2058 | |
2059 | /* time stamp */ | |
2060 | p = msg->front.iov_base + req->r_request_release_offset; | |
1f041a89 YZ |
2061 | { |
2062 | struct ceph_timespec ts; | |
2063 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2064 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2065 | } | |
c5c9a0bf YZ |
2066 | |
2067 | msg->front.iov_len = p - msg->front.iov_base; | |
2068 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
01a92f17 SW |
2069 | return 0; |
2070 | } | |
2071 | ||
2f2dc053 SW |
2072 | if (req->r_request) { |
2073 | ceph_msg_put(req->r_request); | |
2074 | req->r_request = NULL; | |
2075 | } | |
6e6f0923 | 2076 | msg = create_request_message(mdsc, req, mds, drop_cap_releases); |
2f2dc053 | 2077 | if (IS_ERR(msg)) { |
e1518c7c | 2078 | req->r_err = PTR_ERR(msg); |
2f2dc053 | 2079 | complete_request(mdsc, req); |
a79832f2 | 2080 | return PTR_ERR(msg); |
2f2dc053 SW |
2081 | } |
2082 | req->r_request = msg; | |
2083 | ||
2084 | rhead = msg->front.iov_base; | |
2f2dc053 SW |
2085 | rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); |
2086 | if (req->r_got_unsafe) | |
2087 | flags |= CEPH_MDS_FLAG_REPLAY; | |
2088 | if (req->r_locked_dir) | |
2089 | flags |= CEPH_MDS_FLAG_WANT_DENTRY; | |
2090 | rhead->flags = cpu_to_le32(flags); | |
2091 | rhead->num_fwd = req->r_num_fwd; | |
2092 | rhead->num_retry = req->r_attempts - 1; | |
01a92f17 | 2093 | rhead->ino = 0; |
2f2dc053 SW |
2094 | |
2095 | dout(" r_locked_dir = %p\n", req->r_locked_dir); | |
2f2dc053 SW |
2096 | return 0; |
2097 | } | |
2098 | ||
2099 | /* | |
2100 | * send request, or put it on the appropriate wait list. | |
2101 | */ | |
2102 | static int __do_request(struct ceph_mds_client *mdsc, | |
2103 | struct ceph_mds_request *req) | |
2104 | { | |
2105 | struct ceph_mds_session *session = NULL; | |
2106 | int mds = -1; | |
2107 | int err = -EAGAIN; | |
2108 | ||
eb1b8af3 YZ |
2109 | if (req->r_err || req->r_got_result) { |
2110 | if (req->r_aborted) | |
2111 | __unregister_request(mdsc, req); | |
2f2dc053 | 2112 | goto out; |
eb1b8af3 | 2113 | } |
2f2dc053 SW |
2114 | |
2115 | if (req->r_timeout && | |
2116 | time_after_eq(jiffies, req->r_started + req->r_timeout)) { | |
2117 | dout("do_request timed out\n"); | |
2118 | err = -EIO; | |
2119 | goto finish; | |
2120 | } | |
2121 | ||
dc69e2e9 SW |
2122 | put_request_session(req); |
2123 | ||
2f2dc053 SW |
2124 | mds = __choose_mds(mdsc, req); |
2125 | if (mds < 0 || | |
2126 | ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { | |
2127 | dout("do_request no mds or not active, waiting for map\n"); | |
2128 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2129 | goto out; | |
2130 | } | |
2131 | ||
2132 | /* get, open session */ | |
2133 | session = __ceph_lookup_mds_session(mdsc, mds); | |
9c423956 | 2134 | if (!session) { |
2f2dc053 | 2135 | session = register_session(mdsc, mds); |
9c423956 SW |
2136 | if (IS_ERR(session)) { |
2137 | err = PTR_ERR(session); | |
2138 | goto finish; | |
2139 | } | |
2140 | } | |
dc69e2e9 SW |
2141 | req->r_session = get_session(session); |
2142 | ||
2f2dc053 | 2143 | dout("do_request mds%d session %p state %s\n", mds, session, |
a687ecaf | 2144 | ceph_session_state_name(session->s_state)); |
2f2dc053 SW |
2145 | if (session->s_state != CEPH_MDS_SESSION_OPEN && |
2146 | session->s_state != CEPH_MDS_SESSION_HUNG) { | |
2147 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
2148 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
2149 | __open_session(mdsc, session); | |
2150 | list_add(&req->r_wait, &session->s_waiting); | |
2151 | goto out_session; | |
2152 | } | |
2153 | ||
2154 | /* send request */ | |
2f2dc053 SW |
2155 | req->r_resend_mds = -1; /* forget any previous mds hint */ |
2156 | ||
2157 | if (req->r_request_started == 0) /* note request start time */ | |
2158 | req->r_request_started = jiffies; | |
2159 | ||
6e6f0923 | 2160 | err = __prepare_send_request(mdsc, req, mds, false); |
2f2dc053 SW |
2161 | if (!err) { |
2162 | ceph_msg_get(req->r_request); | |
2163 | ceph_con_send(&session->s_con, req->r_request); | |
2164 | } | |
2165 | ||
2166 | out_session: | |
2167 | ceph_put_mds_session(session); | |
2168 | out: | |
2169 | return err; | |
2170 | ||
2171 | finish: | |
e1518c7c | 2172 | req->r_err = err; |
2f2dc053 SW |
2173 | complete_request(mdsc, req); |
2174 | goto out; | |
2175 | } | |
2176 | ||
2177 | /* | |
2178 | * called under mdsc->mutex | |
2179 | */ | |
2180 | static void __wake_requests(struct ceph_mds_client *mdsc, | |
2181 | struct list_head *head) | |
2182 | { | |
ed75ec2c YZ |
2183 | struct ceph_mds_request *req; |
2184 | LIST_HEAD(tmp_list); | |
2185 | ||
2186 | list_splice_init(head, &tmp_list); | |
2f2dc053 | 2187 | |
ed75ec2c YZ |
2188 | while (!list_empty(&tmp_list)) { |
2189 | req = list_entry(tmp_list.next, | |
2190 | struct ceph_mds_request, r_wait); | |
2f2dc053 | 2191 | list_del_init(&req->r_wait); |
7971bd92 | 2192 | dout(" wake request %p tid %llu\n", req, req->r_tid); |
2f2dc053 SW |
2193 | __do_request(mdsc, req); |
2194 | } | |
2195 | } | |
2196 | ||
2197 | /* | |
2198 | * Wake up threads with requests pending for @mds, so that they can | |
29790f26 | 2199 | * resubmit their requests to a possibly different mds. |
2f2dc053 | 2200 | */ |
29790f26 | 2201 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2f2dc053 | 2202 | { |
44ca18f2 | 2203 | struct ceph_mds_request *req; |
282c1052 | 2204 | struct rb_node *p = rb_first(&mdsc->request_tree); |
2f2dc053 SW |
2205 | |
2206 | dout("kick_requests mds%d\n", mds); | |
282c1052 | 2207 | while (p) { |
44ca18f2 | 2208 | req = rb_entry(p, struct ceph_mds_request, r_node); |
282c1052 | 2209 | p = rb_next(p); |
44ca18f2 SW |
2210 | if (req->r_got_unsafe) |
2211 | continue; | |
3de22be6 YZ |
2212 | if (req->r_attempts > 0) |
2213 | continue; /* only new requests */ | |
44ca18f2 SW |
2214 | if (req->r_session && |
2215 | req->r_session->s_mds == mds) { | |
2216 | dout(" kicking tid %llu\n", req->r_tid); | |
03974e81 | 2217 | list_del_init(&req->r_wait); |
44ca18f2 | 2218 | __do_request(mdsc, req); |
2f2dc053 SW |
2219 | } |
2220 | } | |
2221 | } | |
2222 | ||
2223 | void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, | |
2224 | struct ceph_mds_request *req) | |
2225 | { | |
2226 | dout("submit_request on %p\n", req); | |
2227 | mutex_lock(&mdsc->mutex); | |
2228 | __register_request(mdsc, req, NULL); | |
2229 | __do_request(mdsc, req); | |
2230 | mutex_unlock(&mdsc->mutex); | |
2231 | } | |
2232 | ||
2233 | /* | |
2234 | * Synchrously perform an mds request. Take care of all of the | |
2235 | * session setup, forwarding, retry details. | |
2236 | */ | |
2237 | int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, | |
2238 | struct inode *dir, | |
2239 | struct ceph_mds_request *req) | |
2240 | { | |
2241 | int err; | |
2242 | ||
2243 | dout("do_request on %p\n", req); | |
2244 | ||
2245 | /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ | |
2246 | if (req->r_inode) | |
2247 | ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); | |
2248 | if (req->r_locked_dir) | |
2249 | ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); | |
844d87c3 | 2250 | if (req->r_old_dentry_dir) |
41b02e1f SW |
2251 | ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), |
2252 | CEPH_CAP_PIN); | |
2f2dc053 SW |
2253 | |
2254 | /* issue */ | |
2255 | mutex_lock(&mdsc->mutex); | |
2256 | __register_request(mdsc, req, dir); | |
2257 | __do_request(mdsc, req); | |
2258 | ||
e1518c7c SW |
2259 | if (req->r_err) { |
2260 | err = req->r_err; | |
2261 | __unregister_request(mdsc, req); | |
2262 | dout("do_request early error %d\n", err); | |
2263 | goto out; | |
2f2dc053 SW |
2264 | } |
2265 | ||
e1518c7c SW |
2266 | /* wait */ |
2267 | mutex_unlock(&mdsc->mutex); | |
2268 | dout("do_request waiting\n"); | |
2269 | if (req->r_timeout) { | |
aa91647c | 2270 | err = (long)wait_for_completion_killable_timeout( |
e1518c7c SW |
2271 | &req->r_completion, req->r_timeout); |
2272 | if (err == 0) | |
2273 | err = -EIO; | |
9280be24 YZ |
2274 | } else if (req->r_wait_for_completion) { |
2275 | err = req->r_wait_for_completion(mdsc, req); | |
e1518c7c | 2276 | } else { |
aa91647c | 2277 | err = wait_for_completion_killable(&req->r_completion); |
e1518c7c SW |
2278 | } |
2279 | dout("do_request waited, got %d\n", err); | |
2280 | mutex_lock(&mdsc->mutex); | |
5b1daecd | 2281 | |
e1518c7c SW |
2282 | /* only abort if we didn't race with a real reply */ |
2283 | if (req->r_got_result) { | |
2284 | err = le32_to_cpu(req->r_reply_info.head->result); | |
2285 | } else if (err < 0) { | |
2286 | dout("aborted request %lld with %d\n", req->r_tid, err); | |
b4556396 SW |
2287 | |
2288 | /* | |
2289 | * ensure we aren't running concurrently with | |
2290 | * ceph_fill_trace or ceph_readdir_prepopulate, which | |
2291 | * rely on locks (dir mutex) held by our caller. | |
2292 | */ | |
2293 | mutex_lock(&req->r_fill_mutex); | |
e1518c7c SW |
2294 | req->r_err = err; |
2295 | req->r_aborted = true; | |
b4556396 | 2296 | mutex_unlock(&req->r_fill_mutex); |
5b1daecd | 2297 | |
e1518c7c | 2298 | if (req->r_locked_dir && |
167c9e35 SW |
2299 | (req->r_op & CEPH_MDS_OP_WRITE)) |
2300 | ceph_invalidate_dir_request(req); | |
2f2dc053 | 2301 | } else { |
e1518c7c | 2302 | err = req->r_err; |
2f2dc053 | 2303 | } |
2f2dc053 | 2304 | |
e1518c7c SW |
2305 | out: |
2306 | mutex_unlock(&mdsc->mutex); | |
2f2dc053 SW |
2307 | dout("do_request %p done, result %d\n", req, err); |
2308 | return err; | |
2309 | } | |
2310 | ||
167c9e35 | 2311 | /* |
2f276c51 | 2312 | * Invalidate dir's completeness, dentry lease state on an aborted MDS |
167c9e35 SW |
2313 | * namespace request. |
2314 | */ | |
2315 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) | |
2316 | { | |
2317 | struct inode *inode = req->r_locked_dir; | |
167c9e35 | 2318 | |
2f276c51 | 2319 | dout("invalidate_dir_request %p (complete, lease(s))\n", inode); |
167c9e35 | 2320 | |
2f276c51 | 2321 | ceph_dir_clear_complete(inode); |
167c9e35 SW |
2322 | if (req->r_dentry) |
2323 | ceph_invalidate_dentry_lease(req->r_dentry); | |
2324 | if (req->r_old_dentry) | |
2325 | ceph_invalidate_dentry_lease(req->r_old_dentry); | |
2326 | } | |
2327 | ||
2f2dc053 SW |
2328 | /* |
2329 | * Handle mds reply. | |
2330 | * | |
2331 | * We take the session mutex and parse and process the reply immediately. | |
2332 | * This preserves the logical ordering of replies, capabilities, etc., sent | |
2333 | * by the MDS as they are applied to our local cache. | |
2334 | */ | |
2335 | static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |
2336 | { | |
2337 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2338 | struct ceph_mds_request *req; | |
2339 | struct ceph_mds_reply_head *head = msg->front.iov_base; | |
2340 | struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ | |
982d6011 | 2341 | struct ceph_snap_realm *realm; |
2f2dc053 SW |
2342 | u64 tid; |
2343 | int err, result; | |
2600d2dd | 2344 | int mds = session->s_mds; |
2f2dc053 | 2345 | |
2f2dc053 SW |
2346 | if (msg->front.iov_len < sizeof(*head)) { |
2347 | pr_err("mdsc_handle_reply got corrupt (short) reply\n"); | |
9ec7cab1 | 2348 | ceph_msg_dump(msg); |
2f2dc053 SW |
2349 | return; |
2350 | } | |
2351 | ||
2352 | /* get request, session */ | |
6df058c0 | 2353 | tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2354 | mutex_lock(&mdsc->mutex); |
2355 | req = __lookup_request(mdsc, tid); | |
2356 | if (!req) { | |
2357 | dout("handle_reply on unknown tid %llu\n", tid); | |
2358 | mutex_unlock(&mdsc->mutex); | |
2359 | return; | |
2360 | } | |
2361 | dout("handle_reply %p\n", req); | |
2f2dc053 SW |
2362 | |
2363 | /* correct session? */ | |
d96d6049 | 2364 | if (req->r_session != session) { |
2f2dc053 SW |
2365 | pr_err("mdsc_handle_reply got %llu on session mds%d" |
2366 | " not mds%d\n", tid, session->s_mds, | |
2367 | req->r_session ? req->r_session->s_mds : -1); | |
2368 | mutex_unlock(&mdsc->mutex); | |
2369 | goto out; | |
2370 | } | |
2371 | ||
2372 | /* dup? */ | |
2373 | if ((req->r_got_unsafe && !head->safe) || | |
2374 | (req->r_got_safe && head->safe)) { | |
f3ae1b97 | 2375 | pr_warn("got a dup %s reply on %llu from mds%d\n", |
2f2dc053 SW |
2376 | head->safe ? "safe" : "unsafe", tid, mds); |
2377 | mutex_unlock(&mdsc->mutex); | |
2378 | goto out; | |
2379 | } | |
85792d0d | 2380 | if (req->r_got_safe && !head->safe) { |
f3ae1b97 | 2381 | pr_warn("got unsafe after safe on %llu from mds%d\n", |
85792d0d SW |
2382 | tid, mds); |
2383 | mutex_unlock(&mdsc->mutex); | |
2384 | goto out; | |
2385 | } | |
2f2dc053 SW |
2386 | |
2387 | result = le32_to_cpu(head->result); | |
2388 | ||
2389 | /* | |
e55b71f8 GF |
2390 | * Handle an ESTALE |
2391 | * if we're not talking to the authority, send to them | |
2392 | * if the authority has changed while we weren't looking, | |
2393 | * send to new authority | |
2394 | * Otherwise we just have to return an ESTALE | |
2f2dc053 SW |
2395 | */ |
2396 | if (result == -ESTALE) { | |
e55b71f8 | 2397 | dout("got ESTALE on request %llu", req->r_tid); |
51da8e8c | 2398 | req->r_resend_mds = -1; |
ca18bede | 2399 | if (req->r_direct_mode != USE_AUTH_MDS) { |
e55b71f8 GF |
2400 | dout("not using auth, setting for that now"); |
2401 | req->r_direct_mode = USE_AUTH_MDS; | |
2f2dc053 SW |
2402 | __do_request(mdsc, req); |
2403 | mutex_unlock(&mdsc->mutex); | |
2404 | goto out; | |
e55b71f8 | 2405 | } else { |
ca18bede YZ |
2406 | int mds = __choose_mds(mdsc, req); |
2407 | if (mds >= 0 && mds != req->r_session->s_mds) { | |
2408 | dout("but auth changed, so resending"); | |
e55b71f8 GF |
2409 | __do_request(mdsc, req); |
2410 | mutex_unlock(&mdsc->mutex); | |
2411 | goto out; | |
2412 | } | |
2f2dc053 | 2413 | } |
e55b71f8 | 2414 | dout("have to return ESTALE on request %llu", req->r_tid); |
2f2dc053 SW |
2415 | } |
2416 | ||
e55b71f8 | 2417 | |
2f2dc053 SW |
2418 | if (head->safe) { |
2419 | req->r_got_safe = true; | |
2420 | __unregister_request(mdsc, req); | |
2f2dc053 SW |
2421 | |
2422 | if (req->r_got_unsafe) { | |
2423 | /* | |
2424 | * We already handled the unsafe response, now do the | |
2425 | * cleanup. No need to examine the response; the MDS | |
2426 | * doesn't include any result info in the safe | |
2427 | * response. And even if it did, there is nothing | |
2428 | * useful we could do with a revised return value. | |
2429 | */ | |
2430 | dout("got safe reply %llu, mds%d\n", tid, mds); | |
2431 | list_del_init(&req->r_unsafe_item); | |
2432 | ||
2433 | /* last unsafe request during umount? */ | |
44ca18f2 | 2434 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
03066f23 | 2435 | complete_all(&mdsc->safe_umount_waiters); |
2f2dc053 SW |
2436 | mutex_unlock(&mdsc->mutex); |
2437 | goto out; | |
2438 | } | |
e1518c7c | 2439 | } else { |
2f2dc053 SW |
2440 | req->r_got_unsafe = true; |
2441 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); | |
2442 | } | |
2443 | ||
2444 | dout("handle_reply tid %lld result %d\n", tid, result); | |
2445 | rinfo = &req->r_reply_info; | |
14303d20 | 2446 | err = parse_reply_info(msg, rinfo, session->s_con.peer_features); |
2f2dc053 SW |
2447 | mutex_unlock(&mdsc->mutex); |
2448 | ||
2449 | mutex_lock(&session->s_mutex); | |
2450 | if (err < 0) { | |
25933abd | 2451 | pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); |
9ec7cab1 | 2452 | ceph_msg_dump(msg); |
2f2dc053 SW |
2453 | goto out_err; |
2454 | } | |
2455 | ||
2456 | /* snap trace */ | |
982d6011 | 2457 | realm = NULL; |
2f2dc053 SW |
2458 | if (rinfo->snapblob_len) { |
2459 | down_write(&mdsc->snap_rwsem); | |
2460 | ceph_update_snap_trace(mdsc, rinfo->snapblob, | |
982d6011 YZ |
2461 | rinfo->snapblob + rinfo->snapblob_len, |
2462 | le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, | |
2463 | &realm); | |
2f2dc053 SW |
2464 | downgrade_write(&mdsc->snap_rwsem); |
2465 | } else { | |
2466 | down_read(&mdsc->snap_rwsem); | |
2467 | } | |
2468 | ||
2469 | /* insert trace into our cache */ | |
b4556396 | 2470 | mutex_lock(&req->r_fill_mutex); |
3d14c5d2 | 2471 | err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); |
2f2dc053 | 2472 | if (err == 0) { |
6e8575fa | 2473 | if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || |
81c6aea5 | 2474 | req->r_op == CEPH_MDS_OP_LSSNAP)) |
2f2dc053 | 2475 | ceph_readdir_prepopulate(req, req->r_session); |
37151668 | 2476 | ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
2f2dc053 | 2477 | } |
b4556396 | 2478 | mutex_unlock(&req->r_fill_mutex); |
2f2dc053 SW |
2479 | |
2480 | up_read(&mdsc->snap_rwsem); | |
982d6011 YZ |
2481 | if (realm) |
2482 | ceph_put_snap_realm(mdsc, realm); | |
2f2dc053 | 2483 | out_err: |
e1518c7c SW |
2484 | mutex_lock(&mdsc->mutex); |
2485 | if (!req->r_aborted) { | |
2486 | if (err) { | |
2487 | req->r_err = err; | |
2488 | } else { | |
2489 | req->r_reply = msg; | |
2490 | ceph_msg_get(msg); | |
2491 | req->r_got_result = true; | |
2492 | } | |
2f2dc053 | 2493 | } else { |
e1518c7c | 2494 | dout("reply arrived after request %lld was aborted\n", tid); |
2f2dc053 | 2495 | } |
e1518c7c | 2496 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 2497 | |
2f2dc053 SW |
2498 | mutex_unlock(&session->s_mutex); |
2499 | ||
2500 | /* kick calling process */ | |
2501 | complete_request(mdsc, req); | |
2502 | out: | |
2503 | ceph_mdsc_put_request(req); | |
2504 | return; | |
2505 | } | |
2506 | ||
2507 | ||
2508 | ||
2509 | /* | |
2510 | * handle mds notification that our request has been forwarded. | |
2511 | */ | |
2600d2dd SW |
2512 | static void handle_forward(struct ceph_mds_client *mdsc, |
2513 | struct ceph_mds_session *session, | |
2514 | struct ceph_msg *msg) | |
2f2dc053 SW |
2515 | { |
2516 | struct ceph_mds_request *req; | |
a1ea787c | 2517 | u64 tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2518 | u32 next_mds; |
2519 | u32 fwd_seq; | |
2f2dc053 SW |
2520 | int err = -EINVAL; |
2521 | void *p = msg->front.iov_base; | |
2522 | void *end = p + msg->front.iov_len; | |
2f2dc053 | 2523 | |
a1ea787c | 2524 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
c89136ea SW |
2525 | next_mds = ceph_decode_32(&p); |
2526 | fwd_seq = ceph_decode_32(&p); | |
2f2dc053 SW |
2527 | |
2528 | mutex_lock(&mdsc->mutex); | |
2529 | req = __lookup_request(mdsc, tid); | |
2530 | if (!req) { | |
2a8e5e36 | 2531 | dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); |
2f2dc053 SW |
2532 | goto out; /* dup reply? */ |
2533 | } | |
2534 | ||
2a8e5e36 SW |
2535 | if (req->r_aborted) { |
2536 | dout("forward tid %llu aborted, unregistering\n", tid); | |
2537 | __unregister_request(mdsc, req); | |
2538 | } else if (fwd_seq <= req->r_num_fwd) { | |
2539 | dout("forward tid %llu to mds%d - old seq %d <= %d\n", | |
2f2dc053 SW |
2540 | tid, next_mds, req->r_num_fwd, fwd_seq); |
2541 | } else { | |
2542 | /* resend. forward race not possible; mds would drop */ | |
2a8e5e36 SW |
2543 | dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); |
2544 | BUG_ON(req->r_err); | |
2545 | BUG_ON(req->r_got_result); | |
3de22be6 | 2546 | req->r_attempts = 0; |
2f2dc053 SW |
2547 | req->r_num_fwd = fwd_seq; |
2548 | req->r_resend_mds = next_mds; | |
2549 | put_request_session(req); | |
2550 | __do_request(mdsc, req); | |
2551 | } | |
2552 | ceph_mdsc_put_request(req); | |
2553 | out: | |
2554 | mutex_unlock(&mdsc->mutex); | |
2555 | return; | |
2556 | ||
2557 | bad: | |
2558 | pr_err("mdsc_handle_forward decode error err=%d\n", err); | |
2559 | } | |
2560 | ||
2561 | /* | |
2562 | * handle a mds session control message | |
2563 | */ | |
2564 | static void handle_session(struct ceph_mds_session *session, | |
2565 | struct ceph_msg *msg) | |
2566 | { | |
2567 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2568 | u32 op; | |
2569 | u64 seq; | |
2600d2dd | 2570 | int mds = session->s_mds; |
2f2dc053 SW |
2571 | struct ceph_mds_session_head *h = msg->front.iov_base; |
2572 | int wake = 0; | |
2573 | ||
2f2dc053 SW |
2574 | /* decode */ |
2575 | if (msg->front.iov_len != sizeof(*h)) | |
2576 | goto bad; | |
2577 | op = le32_to_cpu(h->op); | |
2578 | seq = le64_to_cpu(h->seq); | |
2579 | ||
2580 | mutex_lock(&mdsc->mutex); | |
2600d2dd SW |
2581 | if (op == CEPH_SESSION_CLOSE) |
2582 | __unregister_session(mdsc, session); | |
2f2dc053 SW |
2583 | /* FIXME: this ttl calculation is generous */ |
2584 | session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; | |
2585 | mutex_unlock(&mdsc->mutex); | |
2586 | ||
2587 | mutex_lock(&session->s_mutex); | |
2588 | ||
2589 | dout("handle_session mds%d %s %p state %s seq %llu\n", | |
2590 | mds, ceph_session_op_name(op), session, | |
a687ecaf | 2591 | ceph_session_state_name(session->s_state), seq); |
2f2dc053 SW |
2592 | |
2593 | if (session->s_state == CEPH_MDS_SESSION_HUNG) { | |
2594 | session->s_state = CEPH_MDS_SESSION_OPEN; | |
2595 | pr_info("mds%d came back\n", session->s_mds); | |
2596 | } | |
2597 | ||
2598 | switch (op) { | |
2599 | case CEPH_SESSION_OPEN: | |
29790f26 SW |
2600 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2601 | pr_info("mds%d reconnect success\n", session->s_mds); | |
2f2dc053 SW |
2602 | session->s_state = CEPH_MDS_SESSION_OPEN; |
2603 | renewed_caps(mdsc, session, 0); | |
2604 | wake = 1; | |
2605 | if (mdsc->stopping) | |
2606 | __close_session(mdsc, session); | |
2607 | break; | |
2608 | ||
2609 | case CEPH_SESSION_RENEWCAPS: | |
2610 | if (session->s_renew_seq == seq) | |
2611 | renewed_caps(mdsc, session, 1); | |
2612 | break; | |
2613 | ||
2614 | case CEPH_SESSION_CLOSE: | |
29790f26 SW |
2615 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2616 | pr_info("mds%d reconnect denied\n", session->s_mds); | |
1c841a96 | 2617 | cleanup_session_requests(mdsc, session); |
2f2dc053 | 2618 | remove_session_caps(session); |
656e4382 | 2619 | wake = 2; /* for good measure */ |
f3c60c59 | 2620 | wake_up_all(&mdsc->session_close_wq); |
2f2dc053 SW |
2621 | break; |
2622 | ||
2623 | case CEPH_SESSION_STALE: | |
2624 | pr_info("mds%d caps went stale, renewing\n", | |
2625 | session->s_mds); | |
d8fb02ab | 2626 | spin_lock(&session->s_gen_ttl_lock); |
2f2dc053 | 2627 | session->s_cap_gen++; |
1ce208a6 | 2628 | session->s_cap_ttl = jiffies - 1; |
d8fb02ab | 2629 | spin_unlock(&session->s_gen_ttl_lock); |
2f2dc053 SW |
2630 | send_renew_caps(mdsc, session); |
2631 | break; | |
2632 | ||
2633 | case CEPH_SESSION_RECALL_STATE: | |
2634 | trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); | |
2635 | break; | |
2636 | ||
186e4f7a YZ |
2637 | case CEPH_SESSION_FLUSHMSG: |
2638 | send_flushmsg_ack(mdsc, session, seq); | |
2639 | break; | |
2640 | ||
03f4fcb0 YZ |
2641 | case CEPH_SESSION_FORCE_RO: |
2642 | dout("force_session_readonly %p\n", session); | |
2643 | spin_lock(&session->s_cap_lock); | |
2644 | session->s_readonly = true; | |
2645 | spin_unlock(&session->s_cap_lock); | |
2646 | wake_up_session_caps(session, 0); | |
2647 | break; | |
2648 | ||
2f2dc053 SW |
2649 | default: |
2650 | pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); | |
2651 | WARN_ON(1); | |
2652 | } | |
2653 | ||
2654 | mutex_unlock(&session->s_mutex); | |
2655 | if (wake) { | |
2656 | mutex_lock(&mdsc->mutex); | |
2657 | __wake_requests(mdsc, &session->s_waiting); | |
656e4382 YZ |
2658 | if (wake == 2) |
2659 | kick_requests(mdsc, mds); | |
2f2dc053 SW |
2660 | mutex_unlock(&mdsc->mutex); |
2661 | } | |
2662 | return; | |
2663 | ||
2664 | bad: | |
2665 | pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, | |
2666 | (int)msg->front.iov_len); | |
9ec7cab1 | 2667 | ceph_msg_dump(msg); |
2f2dc053 SW |
2668 | return; |
2669 | } | |
2670 | ||
2671 | ||
2672 | /* | |
2673 | * called under session->mutex. | |
2674 | */ | |
2675 | static void replay_unsafe_requests(struct ceph_mds_client *mdsc, | |
2676 | struct ceph_mds_session *session) | |
2677 | { | |
2678 | struct ceph_mds_request *req, *nreq; | |
3de22be6 | 2679 | struct rb_node *p; |
2f2dc053 SW |
2680 | int err; |
2681 | ||
2682 | dout("replay_unsafe_requests mds%d\n", session->s_mds); | |
2683 | ||
2684 | mutex_lock(&mdsc->mutex); | |
2685 | list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { | |
6e6f0923 | 2686 | err = __prepare_send_request(mdsc, req, session->s_mds, true); |
2f2dc053 SW |
2687 | if (!err) { |
2688 | ceph_msg_get(req->r_request); | |
2689 | ceph_con_send(&session->s_con, req->r_request); | |
2690 | } | |
2691 | } | |
3de22be6 YZ |
2692 | |
2693 | /* | |
2694 | * also re-send old requests when MDS enters reconnect stage. So that MDS | |
2695 | * can process completed request in clientreplay stage. | |
2696 | */ | |
2697 | p = rb_first(&mdsc->request_tree); | |
2698 | while (p) { | |
2699 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
2700 | p = rb_next(p); | |
2701 | if (req->r_got_unsafe) | |
2702 | continue; | |
2703 | if (req->r_attempts == 0) | |
2704 | continue; /* only old requests */ | |
2705 | if (req->r_session && | |
2706 | req->r_session->s_mds == session->s_mds) { | |
6e6f0923 YZ |
2707 | err = __prepare_send_request(mdsc, req, |
2708 | session->s_mds, true); | |
3de22be6 YZ |
2709 | if (!err) { |
2710 | ceph_msg_get(req->r_request); | |
2711 | ceph_con_send(&session->s_con, req->r_request); | |
2712 | } | |
2713 | } | |
2714 | } | |
2f2dc053 SW |
2715 | mutex_unlock(&mdsc->mutex); |
2716 | } | |
2717 | ||
2718 | /* | |
2719 | * Encode information about a cap for a reconnect with the MDS. | |
2720 | */ | |
2f2dc053 SW |
2721 | static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, |
2722 | void *arg) | |
2723 | { | |
20cb34ae SW |
2724 | union { |
2725 | struct ceph_mds_cap_reconnect v2; | |
2726 | struct ceph_mds_cap_reconnect_v1 v1; | |
2727 | } rec; | |
2728 | size_t reclen; | |
2f2dc053 | 2729 | struct ceph_inode_info *ci; |
20cb34ae SW |
2730 | struct ceph_reconnect_state *recon_state = arg; |
2731 | struct ceph_pagelist *pagelist = recon_state->pagelist; | |
2f2dc053 SW |
2732 | char *path; |
2733 | int pathlen, err; | |
2734 | u64 pathbase; | |
2735 | struct dentry *dentry; | |
2736 | ||
2737 | ci = cap->ci; | |
2738 | ||
2739 | dout(" adding %p ino %llx.%llx cap %p %lld %s\n", | |
2740 | inode, ceph_vinop(inode), cap, cap->cap_id, | |
2741 | ceph_cap_string(cap->issued)); | |
93cea5be SW |
2742 | err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); |
2743 | if (err) | |
2744 | return err; | |
2f2dc053 SW |
2745 | |
2746 | dentry = d_find_alias(inode); | |
2747 | if (dentry) { | |
2748 | path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); | |
2749 | if (IS_ERR(path)) { | |
2750 | err = PTR_ERR(path); | |
e072f8aa | 2751 | goto out_dput; |
2f2dc053 SW |
2752 | } |
2753 | } else { | |
2754 | path = NULL; | |
2755 | pathlen = 0; | |
2756 | } | |
93cea5be SW |
2757 | err = ceph_pagelist_encode_string(pagelist, path, pathlen); |
2758 | if (err) | |
e072f8aa | 2759 | goto out_free; |
2f2dc053 | 2760 | |
be655596 | 2761 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
2762 | cap->seq = 0; /* reset cap seq */ |
2763 | cap->issue_seq = 0; /* and issue_seq */ | |
667ca05c | 2764 | cap->mseq = 0; /* and migrate_seq */ |
99a9c273 | 2765 | cap->cap_gen = cap->session->s_cap_gen; |
20cb34ae SW |
2766 | |
2767 | if (recon_state->flock) { | |
2768 | rec.v2.cap_id = cpu_to_le64(cap->cap_id); | |
2769 | rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2770 | rec.v2.issued = cpu_to_le32(cap->issued); | |
2771 | rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2772 | rec.v2.pathbase = cpu_to_le64(pathbase); | |
2773 | rec.v2.flock_len = 0; | |
2774 | reclen = sizeof(rec.v2); | |
2775 | } else { | |
2776 | rec.v1.cap_id = cpu_to_le64(cap->cap_id); | |
2777 | rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2778 | rec.v1.issued = cpu_to_le32(cap->issued); | |
2779 | rec.v1.size = cpu_to_le64(inode->i_size); | |
2780 | ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); | |
2781 | ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); | |
2782 | rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2783 | rec.v1.pathbase = cpu_to_le64(pathbase); | |
2784 | reclen = sizeof(rec.v1); | |
2785 | } | |
be655596 | 2786 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 | 2787 | |
40819f6f GF |
2788 | if (recon_state->flock) { |
2789 | int num_fcntl_locks, num_flock_locks; | |
39be95e9 JS |
2790 | struct ceph_filelock *flocks; |
2791 | ||
2792 | encode_again: | |
39be95e9 | 2793 | ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); |
39be95e9 JS |
2794 | flocks = kmalloc((num_fcntl_locks+num_flock_locks) * |
2795 | sizeof(struct ceph_filelock), GFP_NOFS); | |
2796 | if (!flocks) { | |
2797 | err = -ENOMEM; | |
2798 | goto out_free; | |
2799 | } | |
39be95e9 JS |
2800 | err = ceph_encode_locks_to_buffer(inode, flocks, |
2801 | num_fcntl_locks, | |
2802 | num_flock_locks); | |
39be95e9 JS |
2803 | if (err) { |
2804 | kfree(flocks); | |
2805 | if (err == -ENOSPC) | |
2806 | goto encode_again; | |
2807 | goto out_free; | |
2808 | } | |
2809 | /* | |
2810 | * number of encoded locks is stable, so copy to pagelist | |
2811 | */ | |
2812 | rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + | |
2813 | (num_fcntl_locks+num_flock_locks) * | |
2814 | sizeof(struct ceph_filelock)); | |
2815 | err = ceph_pagelist_append(pagelist, &rec, reclen); | |
2816 | if (!err) | |
2817 | err = ceph_locks_to_pagelist(flocks, pagelist, | |
2818 | num_fcntl_locks, | |
2819 | num_flock_locks); | |
2820 | kfree(flocks); | |
3612abbd SW |
2821 | } else { |
2822 | err = ceph_pagelist_append(pagelist, &rec, reclen); | |
40819f6f | 2823 | } |
44c99757 YZ |
2824 | |
2825 | recon_state->nr_caps++; | |
e072f8aa | 2826 | out_free: |
2f2dc053 | 2827 | kfree(path); |
e072f8aa | 2828 | out_dput: |
2f2dc053 | 2829 | dput(dentry); |
93cea5be | 2830 | return err; |
2f2dc053 SW |
2831 | } |
2832 | ||
2833 | ||
2834 | /* | |
2835 | * If an MDS fails and recovers, clients need to reconnect in order to | |
2836 | * reestablish shared state. This includes all caps issued through | |
2837 | * this session _and_ the snap_realm hierarchy. Because it's not | |
2838 | * clear which snap realms the mds cares about, we send everything we | |
2839 | * know about.. that ensures we'll then get any new info the | |
2840 | * recovering MDS might have. | |
2841 | * | |
2842 | * This is a relatively heavyweight operation, but it's rare. | |
2843 | * | |
2844 | * called with mdsc->mutex held. | |
2845 | */ | |
34b6c855 SW |
2846 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
2847 | struct ceph_mds_session *session) | |
2f2dc053 | 2848 | { |
2f2dc053 | 2849 | struct ceph_msg *reply; |
a105f00c | 2850 | struct rb_node *p; |
34b6c855 | 2851 | int mds = session->s_mds; |
9abf82b8 | 2852 | int err = -ENOMEM; |
44c99757 | 2853 | int s_nr_caps; |
93cea5be | 2854 | struct ceph_pagelist *pagelist; |
20cb34ae | 2855 | struct ceph_reconnect_state recon_state; |
2f2dc053 | 2856 | |
34b6c855 | 2857 | pr_info("mds%d reconnect start\n", mds); |
2f2dc053 | 2858 | |
93cea5be SW |
2859 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); |
2860 | if (!pagelist) | |
2861 | goto fail_nopagelist; | |
2862 | ceph_pagelist_init(pagelist); | |
2863 | ||
b61c2763 | 2864 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); |
a79832f2 | 2865 | if (!reply) |
93cea5be | 2866 | goto fail_nomsg; |
93cea5be | 2867 | |
34b6c855 SW |
2868 | mutex_lock(&session->s_mutex); |
2869 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; | |
2870 | session->s_seq = 0; | |
2f2dc053 | 2871 | |
2f2dc053 | 2872 | dout("session %p state %s\n", session, |
a687ecaf | 2873 | ceph_session_state_name(session->s_state)); |
2f2dc053 | 2874 | |
99a9c273 YZ |
2875 | spin_lock(&session->s_gen_ttl_lock); |
2876 | session->s_cap_gen++; | |
2877 | spin_unlock(&session->s_gen_ttl_lock); | |
2878 | ||
2879 | spin_lock(&session->s_cap_lock); | |
03f4fcb0 YZ |
2880 | /* don't know if session is readonly */ |
2881 | session->s_readonly = 0; | |
99a9c273 YZ |
2882 | /* |
2883 | * notify __ceph_remove_cap() that we are composing cap reconnect. | |
2884 | * If a cap get released before being added to the cap reconnect, | |
2885 | * __ceph_remove_cap() should skip queuing cap release. | |
2886 | */ | |
2887 | session->s_cap_reconnect = 1; | |
e01a5946 | 2888 | /* drop old cap expires; we're about to reestablish that state */ |
745a8e3b | 2889 | cleanup_cap_releases(mdsc, session); |
e01a5946 | 2890 | |
5d23371f | 2891 | /* trim unused caps to reduce MDS's cache rejoin time */ |
c0bd50e2 YZ |
2892 | if (mdsc->fsc->sb->s_root) |
2893 | shrink_dcache_parent(mdsc->fsc->sb->s_root); | |
5d23371f YZ |
2894 | |
2895 | ceph_con_close(&session->s_con); | |
2896 | ceph_con_open(&session->s_con, | |
2897 | CEPH_ENTITY_TYPE_MDS, mds, | |
2898 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
2899 | ||
2900 | /* replay unsafe requests */ | |
2901 | replay_unsafe_requests(mdsc, session); | |
2902 | ||
2903 | down_read(&mdsc->snap_rwsem); | |
2904 | ||
2f2dc053 | 2905 | /* traverse this session's caps */ |
44c99757 YZ |
2906 | s_nr_caps = session->s_nr_caps; |
2907 | err = ceph_pagelist_encode_32(pagelist, s_nr_caps); | |
93cea5be SW |
2908 | if (err) |
2909 | goto fail; | |
20cb34ae | 2910 | |
44c99757 | 2911 | recon_state.nr_caps = 0; |
20cb34ae SW |
2912 | recon_state.pagelist = pagelist; |
2913 | recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; | |
2914 | err = iterate_session_caps(session, encode_caps_cb, &recon_state); | |
2f2dc053 | 2915 | if (err < 0) |
9abf82b8 | 2916 | goto fail; |
2f2dc053 | 2917 | |
99a9c273 YZ |
2918 | spin_lock(&session->s_cap_lock); |
2919 | session->s_cap_reconnect = 0; | |
2920 | spin_unlock(&session->s_cap_lock); | |
2921 | ||
2f2dc053 SW |
2922 | /* |
2923 | * snaprealms. we provide mds with the ino, seq (version), and | |
2924 | * parent for all of our realms. If the mds has any newer info, | |
2925 | * it will tell us. | |
2926 | */ | |
a105f00c SW |
2927 | for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { |
2928 | struct ceph_snap_realm *realm = | |
2929 | rb_entry(p, struct ceph_snap_realm, node); | |
93cea5be | 2930 | struct ceph_mds_snaprealm_reconnect sr_rec; |
2f2dc053 SW |
2931 | |
2932 | dout(" adding snap realm %llx seq %lld parent %llx\n", | |
2933 | realm->ino, realm->seq, realm->parent_ino); | |
93cea5be SW |
2934 | sr_rec.ino = cpu_to_le64(realm->ino); |
2935 | sr_rec.seq = cpu_to_le64(realm->seq); | |
2936 | sr_rec.parent = cpu_to_le64(realm->parent_ino); | |
2937 | err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); | |
2938 | if (err) | |
2939 | goto fail; | |
2f2dc053 | 2940 | } |
2f2dc053 | 2941 | |
20cb34ae SW |
2942 | if (recon_state.flock) |
2943 | reply->hdr.version = cpu_to_le16(2); | |
44c99757 YZ |
2944 | |
2945 | /* raced with cap release? */ | |
2946 | if (s_nr_caps != recon_state.nr_caps) { | |
2947 | struct page *page = list_first_entry(&pagelist->head, | |
2948 | struct page, lru); | |
2949 | __le32 *addr = kmap_atomic(page); | |
2950 | *addr = cpu_to_le32(recon_state.nr_caps); | |
2951 | kunmap_atomic(addr); | |
ebf18f47 | 2952 | } |
44c99757 YZ |
2953 | |
2954 | reply->hdr.data_len = cpu_to_le32(pagelist->length); | |
2955 | ceph_msg_data_add_pagelist(reply, pagelist); | |
2f2dc053 SW |
2956 | ceph_con_send(&session->s_con, reply); |
2957 | ||
9abf82b8 SW |
2958 | mutex_unlock(&session->s_mutex); |
2959 | ||
2960 | mutex_lock(&mdsc->mutex); | |
2961 | __wake_requests(mdsc, &session->s_waiting); | |
2962 | mutex_unlock(&mdsc->mutex); | |
2963 | ||
2f2dc053 | 2964 | up_read(&mdsc->snap_rwsem); |
2f2dc053 SW |
2965 | return; |
2966 | ||
93cea5be | 2967 | fail: |
2f2dc053 | 2968 | ceph_msg_put(reply); |
9abf82b8 SW |
2969 | up_read(&mdsc->snap_rwsem); |
2970 | mutex_unlock(&session->s_mutex); | |
93cea5be SW |
2971 | fail_nomsg: |
2972 | ceph_pagelist_release(pagelist); | |
93cea5be | 2973 | fail_nopagelist: |
9abf82b8 | 2974 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
9abf82b8 | 2975 | return; |
2f2dc053 SW |
2976 | } |
2977 | ||
2978 | ||
2979 | /* | |
2980 | * compare old and new mdsmaps, kicking requests | |
2981 | * and closing out old connections as necessary | |
2982 | * | |
2983 | * called under mdsc->mutex. | |
2984 | */ | |
2985 | static void check_new_map(struct ceph_mds_client *mdsc, | |
2986 | struct ceph_mdsmap *newmap, | |
2987 | struct ceph_mdsmap *oldmap) | |
2988 | { | |
2989 | int i; | |
2990 | int oldstate, newstate; | |
2991 | struct ceph_mds_session *s; | |
2992 | ||
2993 | dout("check_new_map new %u old %u\n", | |
2994 | newmap->m_epoch, oldmap->m_epoch); | |
2995 | ||
2996 | for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { | |
2997 | if (mdsc->sessions[i] == NULL) | |
2998 | continue; | |
2999 | s = mdsc->sessions[i]; | |
3000 | oldstate = ceph_mdsmap_get_state(oldmap, i); | |
3001 | newstate = ceph_mdsmap_get_state(newmap, i); | |
3002 | ||
0deb01c9 | 3003 | dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", |
2f2dc053 | 3004 | i, ceph_mds_state_name(oldstate), |
0deb01c9 | 3005 | ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", |
2f2dc053 | 3006 | ceph_mds_state_name(newstate), |
0deb01c9 | 3007 | ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", |
a687ecaf | 3008 | ceph_session_state_name(s->s_state)); |
2f2dc053 | 3009 | |
3e8f43a0 YZ |
3010 | if (i >= newmap->m_max_mds || |
3011 | memcmp(ceph_mdsmap_get_addr(oldmap, i), | |
2f2dc053 SW |
3012 | ceph_mdsmap_get_addr(newmap, i), |
3013 | sizeof(struct ceph_entity_addr))) { | |
3014 | if (s->s_state == CEPH_MDS_SESSION_OPENING) { | |
3015 | /* the session never opened, just close it | |
3016 | * out now */ | |
3017 | __wake_requests(mdsc, &s->s_waiting); | |
2600d2dd | 3018 | __unregister_session(mdsc, s); |
2f2dc053 SW |
3019 | } else { |
3020 | /* just close it */ | |
3021 | mutex_unlock(&mdsc->mutex); | |
3022 | mutex_lock(&s->s_mutex); | |
3023 | mutex_lock(&mdsc->mutex); | |
3024 | ceph_con_close(&s->s_con); | |
3025 | mutex_unlock(&s->s_mutex); | |
3026 | s->s_state = CEPH_MDS_SESSION_RESTARTING; | |
3027 | } | |
2f2dc053 SW |
3028 | } else if (oldstate == newstate) { |
3029 | continue; /* nothing new with this mds */ | |
3030 | } | |
3031 | ||
3032 | /* | |
3033 | * send reconnect? | |
3034 | */ | |
3035 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && | |
34b6c855 SW |
3036 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
3037 | mutex_unlock(&mdsc->mutex); | |
3038 | send_mds_reconnect(mdsc, s); | |
3039 | mutex_lock(&mdsc->mutex); | |
3040 | } | |
2f2dc053 SW |
3041 | |
3042 | /* | |
29790f26 | 3043 | * kick request on any mds that has gone active. |
2f2dc053 SW |
3044 | */ |
3045 | if (oldstate < CEPH_MDS_STATE_ACTIVE && | |
3046 | newstate >= CEPH_MDS_STATE_ACTIVE) { | |
29790f26 SW |
3047 | if (oldstate != CEPH_MDS_STATE_CREATING && |
3048 | oldstate != CEPH_MDS_STATE_STARTING) | |
3049 | pr_info("mds%d recovery completed\n", s->s_mds); | |
3050 | kick_requests(mdsc, i); | |
2f2dc053 | 3051 | ceph_kick_flushing_caps(mdsc, s); |
0dc2570f | 3052 | wake_up_session_caps(s, 1); |
2f2dc053 SW |
3053 | } |
3054 | } | |
cb170a22 SW |
3055 | |
3056 | for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { | |
3057 | s = mdsc->sessions[i]; | |
3058 | if (!s) | |
3059 | continue; | |
3060 | if (!ceph_mdsmap_is_laggy(newmap, i)) | |
3061 | continue; | |
3062 | if (s->s_state == CEPH_MDS_SESSION_OPEN || | |
3063 | s->s_state == CEPH_MDS_SESSION_HUNG || | |
3064 | s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3065 | dout(" connecting to export targets of laggy mds%d\n", | |
3066 | i); | |
3067 | __open_export_target_sessions(mdsc, s); | |
3068 | } | |
3069 | } | |
2f2dc053 SW |
3070 | } |
3071 | ||
3072 | ||
3073 | ||
3074 | /* | |
3075 | * leases | |
3076 | */ | |
3077 | ||
3078 | /* | |
3079 | * caller must hold session s_mutex, dentry->d_lock | |
3080 | */ | |
3081 | void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) | |
3082 | { | |
3083 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
3084 | ||
3085 | ceph_put_mds_session(di->lease_session); | |
3086 | di->lease_session = NULL; | |
3087 | } | |
3088 | ||
2600d2dd SW |
3089 | static void handle_lease(struct ceph_mds_client *mdsc, |
3090 | struct ceph_mds_session *session, | |
3091 | struct ceph_msg *msg) | |
2f2dc053 | 3092 | { |
3d14c5d2 | 3093 | struct super_block *sb = mdsc->fsc->sb; |
2f2dc053 | 3094 | struct inode *inode; |
2f2dc053 SW |
3095 | struct dentry *parent, *dentry; |
3096 | struct ceph_dentry_info *di; | |
2600d2dd | 3097 | int mds = session->s_mds; |
2f2dc053 | 3098 | struct ceph_mds_lease *h = msg->front.iov_base; |
1e5ea23d | 3099 | u32 seq; |
2f2dc053 | 3100 | struct ceph_vino vino; |
2f2dc053 SW |
3101 | struct qstr dname; |
3102 | int release = 0; | |
3103 | ||
2f2dc053 SW |
3104 | dout("handle_lease from mds%d\n", mds); |
3105 | ||
3106 | /* decode */ | |
3107 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) | |
3108 | goto bad; | |
3109 | vino.ino = le64_to_cpu(h->ino); | |
3110 | vino.snap = CEPH_NOSNAP; | |
1e5ea23d | 3111 | seq = le32_to_cpu(h->seq); |
2f2dc053 SW |
3112 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); |
3113 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); | |
3114 | if (dname.len != get_unaligned_le32(h+1)) | |
3115 | goto bad; | |
3116 | ||
2f2dc053 SW |
3117 | /* lookup inode */ |
3118 | inode = ceph_find_inode(sb, vino); | |
2f90b852 SW |
3119 | dout("handle_lease %s, ino %llx %p %.*s\n", |
3120 | ceph_lease_op_name(h->action), vino.ino, inode, | |
1e5ea23d | 3121 | dname.len, dname.name); |
6cd3bcad YZ |
3122 | |
3123 | mutex_lock(&session->s_mutex); | |
3124 | session->s_seq++; | |
3125 | ||
2f2dc053 SW |
3126 | if (inode == NULL) { |
3127 | dout("handle_lease no inode %llx\n", vino.ino); | |
3128 | goto release; | |
3129 | } | |
2f2dc053 SW |
3130 | |
3131 | /* dentry */ | |
3132 | parent = d_find_alias(inode); | |
3133 | if (!parent) { | |
3134 | dout("no parent dentry on inode %p\n", inode); | |
3135 | WARN_ON(1); | |
3136 | goto release; /* hrm... */ | |
3137 | } | |
3138 | dname.hash = full_name_hash(dname.name, dname.len); | |
3139 | dentry = d_lookup(parent, &dname); | |
3140 | dput(parent); | |
3141 | if (!dentry) | |
3142 | goto release; | |
3143 | ||
3144 | spin_lock(&dentry->d_lock); | |
3145 | di = ceph_dentry(dentry); | |
3146 | switch (h->action) { | |
3147 | case CEPH_MDS_LEASE_REVOKE: | |
3d8eb7a9 | 3148 | if (di->lease_session == session) { |
1e5ea23d SW |
3149 | if (ceph_seq_cmp(di->lease_seq, seq) > 0) |
3150 | h->seq = cpu_to_le32(di->lease_seq); | |
2f2dc053 SW |
3151 | __ceph_mdsc_drop_dentry_lease(dentry); |
3152 | } | |
3153 | release = 1; | |
3154 | break; | |
3155 | ||
3156 | case CEPH_MDS_LEASE_RENEW: | |
3d8eb7a9 | 3157 | if (di->lease_session == session && |
2f2dc053 SW |
3158 | di->lease_gen == session->s_cap_gen && |
3159 | di->lease_renew_from && | |
3160 | di->lease_renew_after == 0) { | |
3161 | unsigned long duration = | |
3563dbdd | 3162 | msecs_to_jiffies(le32_to_cpu(h->duration_ms)); |
2f2dc053 | 3163 | |
1e5ea23d | 3164 | di->lease_seq = seq; |
2f2dc053 SW |
3165 | dentry->d_time = di->lease_renew_from + duration; |
3166 | di->lease_renew_after = di->lease_renew_from + | |
3167 | (duration >> 1); | |
3168 | di->lease_renew_from = 0; | |
3169 | } | |
3170 | break; | |
3171 | } | |
3172 | spin_unlock(&dentry->d_lock); | |
3173 | dput(dentry); | |
3174 | ||
3175 | if (!release) | |
3176 | goto out; | |
3177 | ||
3178 | release: | |
3179 | /* let's just reuse the same message */ | |
3180 | h->action = CEPH_MDS_LEASE_REVOKE_ACK; | |
3181 | ceph_msg_get(msg); | |
3182 | ceph_con_send(&session->s_con, msg); | |
3183 | ||
3184 | out: | |
3185 | iput(inode); | |
3186 | mutex_unlock(&session->s_mutex); | |
2f2dc053 SW |
3187 | return; |
3188 | ||
3189 | bad: | |
3190 | pr_err("corrupt lease message\n"); | |
9ec7cab1 | 3191 | ceph_msg_dump(msg); |
2f2dc053 SW |
3192 | } |
3193 | ||
3194 | void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, | |
3195 | struct inode *inode, | |
3196 | struct dentry *dentry, char action, | |
3197 | u32 seq) | |
3198 | { | |
3199 | struct ceph_msg *msg; | |
3200 | struct ceph_mds_lease *lease; | |
3201 | int len = sizeof(*lease) + sizeof(u32); | |
3202 | int dnamelen = 0; | |
3203 | ||
3204 | dout("lease_send_msg inode %p dentry %p %s to mds%d\n", | |
3205 | inode, dentry, ceph_lease_op_name(action), session->s_mds); | |
3206 | dnamelen = dentry->d_name.len; | |
3207 | len += dnamelen; | |
3208 | ||
b61c2763 | 3209 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); |
a79832f2 | 3210 | if (!msg) |
2f2dc053 SW |
3211 | return; |
3212 | lease = msg->front.iov_base; | |
3213 | lease->action = action; | |
2f2dc053 SW |
3214 | lease->ino = cpu_to_le64(ceph_vino(inode).ino); |
3215 | lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); | |
3216 | lease->seq = cpu_to_le32(seq); | |
3217 | put_unaligned_le32(dnamelen, lease + 1); | |
3218 | memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); | |
3219 | ||
3220 | /* | |
3221 | * if this is a preemptive lease RELEASE, no need to | |
3222 | * flush request stream, since the actual request will | |
3223 | * soon follow. | |
3224 | */ | |
3225 | msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); | |
3226 | ||
3227 | ceph_con_send(&session->s_con, msg); | |
3228 | } | |
3229 | ||
3230 | /* | |
3231 | * Preemptively release a lease we expect to invalidate anyway. | |
3232 | * Pass @inode always, @dentry is optional. | |
3233 | */ | |
3234 | void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, | |
2f90b852 | 3235 | struct dentry *dentry) |
2f2dc053 SW |
3236 | { |
3237 | struct ceph_dentry_info *di; | |
3238 | struct ceph_mds_session *session; | |
3239 | u32 seq; | |
3240 | ||
3241 | BUG_ON(inode == NULL); | |
3242 | BUG_ON(dentry == NULL); | |
2f2dc053 SW |
3243 | |
3244 | /* is dentry lease valid? */ | |
3245 | spin_lock(&dentry->d_lock); | |
3246 | di = ceph_dentry(dentry); | |
3247 | if (!di || !di->lease_session || | |
3248 | di->lease_session->s_mds < 0 || | |
3249 | di->lease_gen != di->lease_session->s_cap_gen || | |
3250 | !time_before(jiffies, dentry->d_time)) { | |
3251 | dout("lease_release inode %p dentry %p -- " | |
2f90b852 SW |
3252 | "no lease\n", |
3253 | inode, dentry); | |
2f2dc053 SW |
3254 | spin_unlock(&dentry->d_lock); |
3255 | return; | |
3256 | } | |
3257 | ||
3258 | /* we do have a lease on this dentry; note mds and seq */ | |
3259 | session = ceph_get_mds_session(di->lease_session); | |
3260 | seq = di->lease_seq; | |
3261 | __ceph_mdsc_drop_dentry_lease(dentry); | |
3262 | spin_unlock(&dentry->d_lock); | |
3263 | ||
2f90b852 SW |
3264 | dout("lease_release inode %p dentry %p to mds%d\n", |
3265 | inode, dentry, session->s_mds); | |
2f2dc053 SW |
3266 | ceph_mdsc_lease_send_msg(session, inode, dentry, |
3267 | CEPH_MDS_LEASE_RELEASE, seq); | |
3268 | ceph_put_mds_session(session); | |
3269 | } | |
3270 | ||
3271 | /* | |
3272 | * drop all leases (and dentry refs) in preparation for umount | |
3273 | */ | |
3274 | static void drop_leases(struct ceph_mds_client *mdsc) | |
3275 | { | |
3276 | int i; | |
3277 | ||
3278 | dout("drop_leases\n"); | |
3279 | mutex_lock(&mdsc->mutex); | |
3280 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3281 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3282 | if (!s) | |
3283 | continue; | |
3284 | mutex_unlock(&mdsc->mutex); | |
3285 | mutex_lock(&s->s_mutex); | |
3286 | mutex_unlock(&s->s_mutex); | |
3287 | ceph_put_mds_session(s); | |
3288 | mutex_lock(&mdsc->mutex); | |
3289 | } | |
3290 | mutex_unlock(&mdsc->mutex); | |
3291 | } | |
3292 | ||
3293 | ||
3294 | ||
3295 | /* | |
3296 | * delayed work -- periodically trim expired leases, renew caps with mds | |
3297 | */ | |
3298 | static void schedule_delayed(struct ceph_mds_client *mdsc) | |
3299 | { | |
3300 | int delay = 5; | |
3301 | unsigned hz = round_jiffies_relative(HZ * delay); | |
3302 | schedule_delayed_work(&mdsc->delayed_work, hz); | |
3303 | } | |
3304 | ||
3305 | static void delayed_work(struct work_struct *work) | |
3306 | { | |
3307 | int i; | |
3308 | struct ceph_mds_client *mdsc = | |
3309 | container_of(work, struct ceph_mds_client, delayed_work.work); | |
3310 | int renew_interval; | |
3311 | int renew_caps; | |
3312 | ||
3313 | dout("mdsc delayed_work\n"); | |
afcdaea3 | 3314 | ceph_check_delayed_caps(mdsc); |
2f2dc053 SW |
3315 | |
3316 | mutex_lock(&mdsc->mutex); | |
3317 | renew_interval = mdsc->mdsmap->m_session_timeout >> 2; | |
3318 | renew_caps = time_after_eq(jiffies, HZ*renew_interval + | |
3319 | mdsc->last_renew_caps); | |
3320 | if (renew_caps) | |
3321 | mdsc->last_renew_caps = jiffies; | |
3322 | ||
3323 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3324 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3325 | if (s == NULL) | |
3326 | continue; | |
3327 | if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3328 | dout("resending session close request for mds%d\n", | |
3329 | s->s_mds); | |
3330 | request_close_session(mdsc, s); | |
3331 | ceph_put_mds_session(s); | |
3332 | continue; | |
3333 | } | |
3334 | if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
3335 | if (s->s_state == CEPH_MDS_SESSION_OPEN) { | |
3336 | s->s_state = CEPH_MDS_SESSION_HUNG; | |
3337 | pr_info("mds%d hung\n", s->s_mds); | |
3338 | } | |
3339 | } | |
3340 | if (s->s_state < CEPH_MDS_SESSION_OPEN) { | |
3341 | /* this mds is failed or recovering, just wait */ | |
3342 | ceph_put_mds_session(s); | |
3343 | continue; | |
3344 | } | |
3345 | mutex_unlock(&mdsc->mutex); | |
3346 | ||
3347 | mutex_lock(&s->s_mutex); | |
3348 | if (renew_caps) | |
3349 | send_renew_caps(mdsc, s); | |
3350 | else | |
3351 | ceph_con_keepalive(&s->s_con); | |
aab53dd9 SW |
3352 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
3353 | s->s_state == CEPH_MDS_SESSION_HUNG) | |
3d7ded4d | 3354 | ceph_send_cap_releases(mdsc, s); |
2f2dc053 SW |
3355 | mutex_unlock(&s->s_mutex); |
3356 | ceph_put_mds_session(s); | |
3357 | ||
3358 | mutex_lock(&mdsc->mutex); | |
3359 | } | |
3360 | mutex_unlock(&mdsc->mutex); | |
3361 | ||
3362 | schedule_delayed(mdsc); | |
3363 | } | |
3364 | ||
3d14c5d2 | 3365 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
2f2dc053 | 3366 | |
2f2dc053 | 3367 | { |
3d14c5d2 YS |
3368 | struct ceph_mds_client *mdsc; |
3369 | ||
3370 | mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); | |
3371 | if (!mdsc) | |
3372 | return -ENOMEM; | |
3373 | mdsc->fsc = fsc; | |
3374 | fsc->mdsc = mdsc; | |
2f2dc053 SW |
3375 | mutex_init(&mdsc->mutex); |
3376 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | |
fb3101b6 | 3377 | if (mdsc->mdsmap == NULL) { |
3378 | kfree(mdsc); | |
2d06eeb8 | 3379 | return -ENOMEM; |
fb3101b6 | 3380 | } |
2d06eeb8 | 3381 | |
2f2dc053 | 3382 | init_completion(&mdsc->safe_umount_waiters); |
f3c60c59 | 3383 | init_waitqueue_head(&mdsc->session_close_wq); |
2f2dc053 SW |
3384 | INIT_LIST_HEAD(&mdsc->waiting_for_map); |
3385 | mdsc->sessions = NULL; | |
86d8f67b | 3386 | atomic_set(&mdsc->num_sessions, 0); |
2f2dc053 SW |
3387 | mdsc->max_sessions = 0; |
3388 | mdsc->stopping = 0; | |
affbc19a | 3389 | mdsc->last_snap_seq = 0; |
2f2dc053 | 3390 | init_rwsem(&mdsc->snap_rwsem); |
a105f00c | 3391 | mdsc->snap_realms = RB_ROOT; |
2f2dc053 SW |
3392 | INIT_LIST_HEAD(&mdsc->snap_empty); |
3393 | spin_lock_init(&mdsc->snap_empty_lock); | |
3394 | mdsc->last_tid = 0; | |
e8a7b8b1 | 3395 | mdsc->oldest_tid = 0; |
44ca18f2 | 3396 | mdsc->request_tree = RB_ROOT; |
2f2dc053 SW |
3397 | INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); |
3398 | mdsc->last_renew_caps = jiffies; | |
3399 | INIT_LIST_HEAD(&mdsc->cap_delay_list); | |
3400 | spin_lock_init(&mdsc->cap_delay_lock); | |
3401 | INIT_LIST_HEAD(&mdsc->snap_flush_list); | |
3402 | spin_lock_init(&mdsc->snap_flush_lock); | |
3403 | mdsc->cap_flush_seq = 0; | |
3404 | INIT_LIST_HEAD(&mdsc->cap_dirty); | |
db354052 | 3405 | INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); |
2f2dc053 SW |
3406 | mdsc->num_cap_flushing = 0; |
3407 | spin_lock_init(&mdsc->cap_dirty_lock); | |
3408 | init_waitqueue_head(&mdsc->cap_flushing_wq); | |
3409 | spin_lock_init(&mdsc->dentry_lru_lock); | |
3410 | INIT_LIST_HEAD(&mdsc->dentry_lru); | |
2d06eeb8 | 3411 | |
37151668 | 3412 | ceph_caps_init(mdsc); |
3d14c5d2 | 3413 | ceph_adjust_min_caps(mdsc, fsc->min_caps); |
37151668 | 3414 | |
10183a69 YZ |
3415 | init_rwsem(&mdsc->pool_perm_rwsem); |
3416 | mdsc->pool_perm_tree = RB_ROOT; | |
3417 | ||
5f44f142 | 3418 | return 0; |
2f2dc053 SW |
3419 | } |
3420 | ||
3421 | /* | |
3422 | * Wait for safe replies on open mds requests. If we time out, drop | |
3423 | * all requests from the tree to avoid dangling dentry refs. | |
3424 | */ | |
3425 | static void wait_requests(struct ceph_mds_client *mdsc) | |
3426 | { | |
3427 | struct ceph_mds_request *req; | |
3d14c5d2 | 3428 | struct ceph_fs_client *fsc = mdsc->fsc; |
2f2dc053 SW |
3429 | |
3430 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3431 | if (__get_oldest_req(mdsc)) { |
2f2dc053 | 3432 | mutex_unlock(&mdsc->mutex); |
44ca18f2 | 3433 | |
2f2dc053 SW |
3434 | dout("wait_requests waiting for requests\n"); |
3435 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, | |
3d14c5d2 | 3436 | fsc->client->options->mount_timeout * HZ); |
2f2dc053 SW |
3437 | |
3438 | /* tear down remaining requests */ | |
44ca18f2 SW |
3439 | mutex_lock(&mdsc->mutex); |
3440 | while ((req = __get_oldest_req(mdsc))) { | |
2f2dc053 SW |
3441 | dout("wait_requests timed out on tid %llu\n", |
3442 | req->r_tid); | |
44ca18f2 | 3443 | __unregister_request(mdsc, req); |
2f2dc053 SW |
3444 | } |
3445 | } | |
3446 | mutex_unlock(&mdsc->mutex); | |
3447 | dout("wait_requests done\n"); | |
3448 | } | |
3449 | ||
3450 | /* | |
3451 | * called before mount is ro, and before dentries are torn down. | |
3452 | * (hmm, does this still race with new lookups?) | |
3453 | */ | |
3454 | void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |
3455 | { | |
3456 | dout("pre_umount\n"); | |
3457 | mdsc->stopping = 1; | |
3458 | ||
3459 | drop_leases(mdsc); | |
afcdaea3 | 3460 | ceph_flush_dirty_caps(mdsc); |
2f2dc053 | 3461 | wait_requests(mdsc); |
17c688c3 SW |
3462 | |
3463 | /* | |
3464 | * wait for reply handlers to drop their request refs and | |
3465 | * their inode/dcache refs | |
3466 | */ | |
3467 | ceph_msgr_flush(); | |
2f2dc053 SW |
3468 | } |
3469 | ||
3470 | /* | |
3471 | * wait for all write mds requests to flush. | |
3472 | */ | |
3473 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) | |
3474 | { | |
80fc7314 | 3475 | struct ceph_mds_request *req = NULL, *nextreq; |
44ca18f2 | 3476 | struct rb_node *n; |
2f2dc053 SW |
3477 | |
3478 | mutex_lock(&mdsc->mutex); | |
3479 | dout("wait_unsafe_requests want %lld\n", want_tid); | |
80fc7314 | 3480 | restart: |
44ca18f2 SW |
3481 | req = __get_oldest_req(mdsc); |
3482 | while (req && req->r_tid <= want_tid) { | |
80fc7314 SW |
3483 | /* find next request */ |
3484 | n = rb_next(&req->r_node); | |
3485 | if (n) | |
3486 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); | |
3487 | else | |
3488 | nextreq = NULL; | |
e8a7b8b1 YZ |
3489 | if (req->r_op != CEPH_MDS_OP_SETFILELOCK && |
3490 | (req->r_op & CEPH_MDS_OP_WRITE)) { | |
44ca18f2 SW |
3491 | /* write op */ |
3492 | ceph_mdsc_get_request(req); | |
80fc7314 SW |
3493 | if (nextreq) |
3494 | ceph_mdsc_get_request(nextreq); | |
44ca18f2 SW |
3495 | mutex_unlock(&mdsc->mutex); |
3496 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", | |
3497 | req->r_tid, want_tid); | |
3498 | wait_for_completion(&req->r_safe_completion); | |
3499 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3500 | ceph_mdsc_put_request(req); |
80fc7314 SW |
3501 | if (!nextreq) |
3502 | break; /* next dne before, so we're done! */ | |
3503 | if (RB_EMPTY_NODE(&nextreq->r_node)) { | |
3504 | /* next request was removed from tree */ | |
3505 | ceph_mdsc_put_request(nextreq); | |
3506 | goto restart; | |
3507 | } | |
3508 | ceph_mdsc_put_request(nextreq); /* won't go away */ | |
44ca18f2 | 3509 | } |
80fc7314 | 3510 | req = nextreq; |
2f2dc053 SW |
3511 | } |
3512 | mutex_unlock(&mdsc->mutex); | |
3513 | dout("wait_unsafe_requests done\n"); | |
3514 | } | |
3515 | ||
3516 | void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |
3517 | { | |
affbc19a | 3518 | u64 want_tid, want_flush, want_snap; |
2f2dc053 | 3519 | |
3d14c5d2 | 3520 | if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
56b7cf95 SW |
3521 | return; |
3522 | ||
2f2dc053 SW |
3523 | dout("sync\n"); |
3524 | mutex_lock(&mdsc->mutex); | |
3525 | want_tid = mdsc->last_tid; | |
2f2dc053 | 3526 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 3527 | |
afcdaea3 | 3528 | ceph_flush_dirty_caps(mdsc); |
d3383a8e YZ |
3529 | spin_lock(&mdsc->cap_dirty_lock); |
3530 | want_flush = mdsc->cap_flush_seq; | |
3531 | spin_unlock(&mdsc->cap_dirty_lock); | |
3532 | ||
affbc19a YZ |
3533 | down_read(&mdsc->snap_rwsem); |
3534 | want_snap = mdsc->last_snap_seq; | |
3535 | up_read(&mdsc->snap_rwsem); | |
3536 | ||
3537 | dout("sync want tid %lld flush_seq %lld snap_seq %lld\n", | |
3538 | want_tid, want_flush, want_snap); | |
2f2dc053 SW |
3539 | |
3540 | wait_unsafe_requests(mdsc, want_tid); | |
affbc19a | 3541 | wait_caps_flush(mdsc, want_flush, want_snap); |
2f2dc053 SW |
3542 | } |
3543 | ||
f3c60c59 SW |
3544 | /* |
3545 | * true if all sessions are closed, or we force unmount | |
3546 | */ | |
7fd7d101 | 3547 | static bool done_closing_sessions(struct ceph_mds_client *mdsc) |
f3c60c59 | 3548 | { |
3d14c5d2 | 3549 | if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
f3c60c59 | 3550 | return true; |
86d8f67b | 3551 | return atomic_read(&mdsc->num_sessions) == 0; |
f3c60c59 | 3552 | } |
2f2dc053 SW |
3553 | |
3554 | /* | |
3555 | * called after sb is ro. | |
3556 | */ | |
3557 | void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |
3558 | { | |
3559 | struct ceph_mds_session *session; | |
3560 | int i; | |
3d14c5d2 YS |
3561 | struct ceph_fs_client *fsc = mdsc->fsc; |
3562 | unsigned long timeout = fsc->client->options->mount_timeout * HZ; | |
2f2dc053 SW |
3563 | |
3564 | dout("close_sessions\n"); | |
3565 | ||
2f2dc053 | 3566 | /* close sessions */ |
f3c60c59 SW |
3567 | mutex_lock(&mdsc->mutex); |
3568 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3569 | session = __ceph_lookup_mds_session(mdsc, i); | |
3570 | if (!session) | |
3571 | continue; | |
2f2dc053 | 3572 | mutex_unlock(&mdsc->mutex); |
f3c60c59 SW |
3573 | mutex_lock(&session->s_mutex); |
3574 | __close_session(mdsc, session); | |
3575 | mutex_unlock(&session->s_mutex); | |
3576 | ceph_put_mds_session(session); | |
2f2dc053 SW |
3577 | mutex_lock(&mdsc->mutex); |
3578 | } | |
f3c60c59 SW |
3579 | mutex_unlock(&mdsc->mutex); |
3580 | ||
3581 | dout("waiting for sessions to close\n"); | |
3582 | wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), | |
3583 | timeout); | |
2f2dc053 SW |
3584 | |
3585 | /* tear down remaining sessions */ | |
f3c60c59 | 3586 | mutex_lock(&mdsc->mutex); |
2f2dc053 SW |
3587 | for (i = 0; i < mdsc->max_sessions; i++) { |
3588 | if (mdsc->sessions[i]) { | |
3589 | session = get_session(mdsc->sessions[i]); | |
2600d2dd | 3590 | __unregister_session(mdsc, session); |
2f2dc053 SW |
3591 | mutex_unlock(&mdsc->mutex); |
3592 | mutex_lock(&session->s_mutex); | |
3593 | remove_session_caps(session); | |
3594 | mutex_unlock(&session->s_mutex); | |
3595 | ceph_put_mds_session(session); | |
3596 | mutex_lock(&mdsc->mutex); | |
3597 | } | |
3598 | } | |
2f2dc053 | 3599 | WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
2f2dc053 SW |
3600 | mutex_unlock(&mdsc->mutex); |
3601 | ||
3602 | ceph_cleanup_empty_realms(mdsc); | |
3603 | ||
3604 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3605 | ||
3606 | dout("stopped\n"); | |
3607 | } | |
3608 | ||
3d14c5d2 | 3609 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
2f2dc053 SW |
3610 | { |
3611 | dout("stop\n"); | |
3612 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3613 | if (mdsc->mdsmap) | |
3614 | ceph_mdsmap_destroy(mdsc->mdsmap); | |
3615 | kfree(mdsc->sessions); | |
37151668 | 3616 | ceph_caps_finalize(mdsc); |
10183a69 | 3617 | ceph_pool_perm_destroy(mdsc); |
2f2dc053 SW |
3618 | } |
3619 | ||
3d14c5d2 YS |
3620 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) |
3621 | { | |
3622 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
3623 | ||
ef550f6f | 3624 | dout("mdsc_destroy %p\n", mdsc); |
3d14c5d2 | 3625 | ceph_mdsc_stop(mdsc); |
ef550f6f SW |
3626 | |
3627 | /* flush out any connection work with references to us */ | |
3628 | ceph_msgr_flush(); | |
3629 | ||
3d14c5d2 YS |
3630 | fsc->mdsc = NULL; |
3631 | kfree(mdsc); | |
ef550f6f | 3632 | dout("mdsc_destroy %p done\n", mdsc); |
3d14c5d2 YS |
3633 | } |
3634 | ||
2f2dc053 SW |
3635 | |
3636 | /* | |
3637 | * handle mds map update. | |
3638 | */ | |
3639 | void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) | |
3640 | { | |
3641 | u32 epoch; | |
3642 | u32 maplen; | |
3643 | void *p = msg->front.iov_base; | |
3644 | void *end = p + msg->front.iov_len; | |
3645 | struct ceph_mdsmap *newmap, *oldmap; | |
3646 | struct ceph_fsid fsid; | |
3647 | int err = -EINVAL; | |
3648 | ||
3649 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); | |
3650 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
3d14c5d2 | 3651 | if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
0743304d | 3652 | return; |
c89136ea SW |
3653 | epoch = ceph_decode_32(&p); |
3654 | maplen = ceph_decode_32(&p); | |
2f2dc053 SW |
3655 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); |
3656 | ||
3657 | /* do we need it? */ | |
3d14c5d2 | 3658 | ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); |
2f2dc053 SW |
3659 | mutex_lock(&mdsc->mutex); |
3660 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { | |
3661 | dout("handle_map epoch %u <= our %u\n", | |
3662 | epoch, mdsc->mdsmap->m_epoch); | |
3663 | mutex_unlock(&mdsc->mutex); | |
3664 | return; | |
3665 | } | |
3666 | ||
3667 | newmap = ceph_mdsmap_decode(&p, end); | |
3668 | if (IS_ERR(newmap)) { | |
3669 | err = PTR_ERR(newmap); | |
3670 | goto bad_unlock; | |
3671 | } | |
3672 | ||
3673 | /* swap into place */ | |
3674 | if (mdsc->mdsmap) { | |
3675 | oldmap = mdsc->mdsmap; | |
3676 | mdsc->mdsmap = newmap; | |
3677 | check_new_map(mdsc, newmap, oldmap); | |
3678 | ceph_mdsmap_destroy(oldmap); | |
3679 | } else { | |
3680 | mdsc->mdsmap = newmap; /* first mds map */ | |
3681 | } | |
3d14c5d2 | 3682 | mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
2f2dc053 SW |
3683 | |
3684 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3685 | ||
3686 | mutex_unlock(&mdsc->mutex); | |
3687 | schedule_delayed(mdsc); | |
3688 | return; | |
3689 | ||
3690 | bad_unlock: | |
3691 | mutex_unlock(&mdsc->mutex); | |
3692 | bad: | |
3693 | pr_err("error decoding mdsmap %d\n", err); | |
3694 | return; | |
3695 | } | |
3696 | ||
3697 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
3698 | { | |
3699 | struct ceph_mds_session *s = con->private; | |
3700 | ||
3701 | if (get_session(s)) { | |
2600d2dd | 3702 | dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref)); |
2f2dc053 SW |
3703 | return con; |
3704 | } | |
3705 | dout("mdsc con_get %p FAIL\n", s); | |
3706 | return NULL; | |
3707 | } | |
3708 | ||
3709 | static void con_put(struct ceph_connection *con) | |
3710 | { | |
3711 | struct ceph_mds_session *s = con->private; | |
3712 | ||
7d8e18a6 | 3713 | dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); |
2f2dc053 SW |
3714 | ceph_put_mds_session(s); |
3715 | } | |
3716 | ||
3717 | /* | |
3718 | * if the client is unresponsive for long enough, the mds will kill | |
3719 | * the session entirely. | |
3720 | */ | |
3721 | static void peer_reset(struct ceph_connection *con) | |
3722 | { | |
3723 | struct ceph_mds_session *s = con->private; | |
7e70f0ed | 3724 | struct ceph_mds_client *mdsc = s->s_mdsc; |
2f2dc053 | 3725 | |
f3ae1b97 | 3726 | pr_warn("mds%d closed our session\n", s->s_mds); |
7e70f0ed | 3727 | send_mds_reconnect(mdsc, s); |
2f2dc053 SW |
3728 | } |
3729 | ||
3730 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
3731 | { | |
3732 | struct ceph_mds_session *s = con->private; | |
3733 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3734 | int type = le16_to_cpu(msg->hdr.type); | |
3735 | ||
2600d2dd SW |
3736 | mutex_lock(&mdsc->mutex); |
3737 | if (__verify_registered_session(mdsc, s) < 0) { | |
3738 | mutex_unlock(&mdsc->mutex); | |
3739 | goto out; | |
3740 | } | |
3741 | mutex_unlock(&mdsc->mutex); | |
3742 | ||
2f2dc053 SW |
3743 | switch (type) { |
3744 | case CEPH_MSG_MDS_MAP: | |
3745 | ceph_mdsc_handle_map(mdsc, msg); | |
3746 | break; | |
3747 | case CEPH_MSG_CLIENT_SESSION: | |
3748 | handle_session(s, msg); | |
3749 | break; | |
3750 | case CEPH_MSG_CLIENT_REPLY: | |
3751 | handle_reply(s, msg); | |
3752 | break; | |
3753 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: | |
2600d2dd | 3754 | handle_forward(mdsc, s, msg); |
2f2dc053 SW |
3755 | break; |
3756 | case CEPH_MSG_CLIENT_CAPS: | |
3757 | ceph_handle_caps(s, msg); | |
3758 | break; | |
3759 | case CEPH_MSG_CLIENT_SNAP: | |
2600d2dd | 3760 | ceph_handle_snap(mdsc, s, msg); |
2f2dc053 SW |
3761 | break; |
3762 | case CEPH_MSG_CLIENT_LEASE: | |
2600d2dd | 3763 | handle_lease(mdsc, s, msg); |
2f2dc053 SW |
3764 | break; |
3765 | ||
3766 | default: | |
3767 | pr_err("received unknown message type %d %s\n", type, | |
3768 | ceph_msg_type_name(type)); | |
3769 | } | |
2600d2dd | 3770 | out: |
2f2dc053 SW |
3771 | ceph_msg_put(msg); |
3772 | } | |
3773 | ||
4e7a5dcd SW |
3774 | /* |
3775 | * authentication | |
3776 | */ | |
a3530df3 AE |
3777 | |
3778 | /* | |
3779 | * Note: returned pointer is the address of a structure that's | |
3780 | * managed separately. Caller must *not* attempt to free it. | |
3781 | */ | |
3782 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 3783 | int *proto, int force_new) |
4e7a5dcd SW |
3784 | { |
3785 | struct ceph_mds_session *s = con->private; | |
3786 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3787 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
74f1869f | 3788 | struct ceph_auth_handshake *auth = &s->s_auth; |
4e7a5dcd | 3789 | |
74f1869f | 3790 | if (force_new && auth->authorizer) { |
27859f97 | 3791 | ceph_auth_destroy_authorizer(ac, auth->authorizer); |
74f1869f | 3792 | auth->authorizer = NULL; |
4e7a5dcd | 3793 | } |
27859f97 SW |
3794 | if (!auth->authorizer) { |
3795 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
3796 | auth); | |
0bed9b5c SW |
3797 | if (ret) |
3798 | return ERR_PTR(ret); | |
27859f97 SW |
3799 | } else { |
3800 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
3801 | auth); | |
a255651d | 3802 | if (ret) |
a3530df3 | 3803 | return ERR_PTR(ret); |
4e7a5dcd | 3804 | } |
4e7a5dcd | 3805 | *proto = ac->protocol; |
74f1869f | 3806 | |
a3530df3 | 3807 | return auth; |
4e7a5dcd SW |
3808 | } |
3809 | ||
3810 | ||
3811 | static int verify_authorizer_reply(struct ceph_connection *con, int len) | |
3812 | { | |
3813 | struct ceph_mds_session *s = con->private; | |
3814 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3815 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd | 3816 | |
27859f97 | 3817 | return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len); |
4e7a5dcd SW |
3818 | } |
3819 | ||
9bd2e6f8 SW |
3820 | static int invalidate_authorizer(struct ceph_connection *con) |
3821 | { | |
3822 | struct ceph_mds_session *s = con->private; | |
3823 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3824 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
9bd2e6f8 | 3825 | |
27859f97 | 3826 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
9bd2e6f8 | 3827 | |
3d14c5d2 | 3828 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
9bd2e6f8 SW |
3829 | } |
3830 | ||
53ded495 AE |
3831 | static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, |
3832 | struct ceph_msg_header *hdr, int *skip) | |
3833 | { | |
3834 | struct ceph_msg *msg; | |
3835 | int type = (int) le16_to_cpu(hdr->type); | |
3836 | int front_len = (int) le32_to_cpu(hdr->front_len); | |
3837 | ||
3838 | if (con->in_msg) | |
3839 | return con->in_msg; | |
3840 | ||
3841 | *skip = 0; | |
3842 | msg = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
3843 | if (!msg) { | |
3844 | pr_err("unable to allocate msg type %d len %d\n", | |
3845 | type, front_len); | |
3846 | return NULL; | |
3847 | } | |
53ded495 AE |
3848 | |
3849 | return msg; | |
3850 | } | |
3851 | ||
33d07337 YZ |
3852 | static int sign_message(struct ceph_connection *con, struct ceph_msg *msg) |
3853 | { | |
3854 | struct ceph_mds_session *s = con->private; | |
3855 | struct ceph_auth_handshake *auth = &s->s_auth; | |
3856 | return ceph_auth_sign_message(auth, msg); | |
3857 | } | |
3858 | ||
3859 | static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg) | |
3860 | { | |
3861 | struct ceph_mds_session *s = con->private; | |
3862 | struct ceph_auth_handshake *auth = &s->s_auth; | |
3863 | return ceph_auth_check_message_signature(auth, msg); | |
3864 | } | |
3865 | ||
9e32789f | 3866 | static const struct ceph_connection_operations mds_con_ops = { |
2f2dc053 SW |
3867 | .get = con_get, |
3868 | .put = con_put, | |
3869 | .dispatch = dispatch, | |
4e7a5dcd SW |
3870 | .get_authorizer = get_authorizer, |
3871 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 3872 | .invalidate_authorizer = invalidate_authorizer, |
2f2dc053 | 3873 | .peer_reset = peer_reset, |
53ded495 | 3874 | .alloc_msg = mds_alloc_msg, |
33d07337 YZ |
3875 | .sign_message = sign_message, |
3876 | .check_message_signature = check_message_signature, | |
2f2dc053 SW |
3877 | }; |
3878 | ||
2f2dc053 | 3879 | /* eof */ |