]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
ba75bb98 | 2 | |
3d14c5d2 | 3 | #include <linux/module.h> |
ba75bb98 | 4 | #include <linux/types.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
ba75bb98 SW |
6 | #include <linux/random.h> |
7 | #include <linux/sched.h> | |
8 | ||
3d14c5d2 YS |
9 | #include <linux/ceph/mon_client.h> |
10 | #include <linux/ceph/libceph.h> | |
ab434b60 | 11 | #include <linux/ceph/debugfs.h> |
3d14c5d2 | 12 | #include <linux/ceph/decode.h> |
3d14c5d2 | 13 | #include <linux/ceph/auth.h> |
ba75bb98 SW |
14 | |
15 | /* | |
16 | * Interact with Ceph monitor cluster. Handle requests for new map | |
17 | * versions, and periodically resend as needed. Also implement | |
18 | * statfs() and umount(). | |
19 | * | |
20 | * A small cluster of Ceph "monitors" are responsible for managing critical | |
21 | * cluster configuration and state information. An odd number (e.g., 3, 5) | |
22 | * of cmon daemons use a modified version of the Paxos part-time parliament | |
23 | * algorithm to manage the MDS map (mds cluster membership), OSD map, and | |
24 | * list of clients who have mounted the file system. | |
25 | * | |
26 | * We maintain an open, active session with a monitor at all times in order to | |
27 | * receive timely MDSMap updates. We periodically send a keepalive byte on the | |
28 | * TCP socket to ensure we detect a failure. If the connection does break, we | |
29 | * randomly hunt for a new monitor. Once the connection is reestablished, we | |
30 | * resend any outstanding requests. | |
31 | */ | |
32 | ||
9e32789f | 33 | static const struct ceph_connection_operations mon_con_ops; |
ba75bb98 | 34 | |
9bd2e6f8 SW |
35 | static int __validate_auth(struct ceph_mon_client *monc); |
36 | ||
ba75bb98 SW |
37 | /* |
38 | * Decode a monmap blob (e.g., during mount). | |
39 | */ | |
40 | struct ceph_monmap *ceph_monmap_decode(void *p, void *end) | |
41 | { | |
42 | struct ceph_monmap *m = NULL; | |
43 | int i, err = -EINVAL; | |
44 | struct ceph_fsid fsid; | |
45 | u32 epoch, num_mon; | |
46 | u16 version; | |
4e7a5dcd SW |
47 | u32 len; |
48 | ||
49 | ceph_decode_32_safe(&p, end, len, bad); | |
50 | ceph_decode_need(&p, end, len, bad); | |
ba75bb98 SW |
51 | |
52 | dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); | |
53 | ||
54 | ceph_decode_16_safe(&p, end, version, bad); | |
55 | ||
56 | ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); | |
57 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
c89136ea | 58 | epoch = ceph_decode_32(&p); |
ba75bb98 | 59 | |
c89136ea | 60 | num_mon = ceph_decode_32(&p); |
ba75bb98 SW |
61 | ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); |
62 | ||
63 | if (num_mon >= CEPH_MAX_MON) | |
64 | goto bad; | |
65 | m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); | |
66 | if (m == NULL) | |
67 | return ERR_PTR(-ENOMEM); | |
68 | m->fsid = fsid; | |
69 | m->epoch = epoch; | |
70 | m->num_mon = num_mon; | |
71 | ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); | |
63f2d211 SW |
72 | for (i = 0; i < num_mon; i++) |
73 | ceph_decode_addr(&m->mon_inst[i].addr); | |
ba75bb98 | 74 | |
ba75bb98 SW |
75 | dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, |
76 | m->num_mon); | |
77 | for (i = 0; i < m->num_mon; i++) | |
78 | dout("monmap_decode mon%d is %s\n", i, | |
3d14c5d2 | 79 | ceph_pr_addr(&m->mon_inst[i].addr.in_addr)); |
ba75bb98 SW |
80 | return m; |
81 | ||
82 | bad: | |
83 | dout("monmap_decode failed with %d\n", err); | |
84 | kfree(m); | |
85 | return ERR_PTR(err); | |
86 | } | |
87 | ||
88 | /* | |
89 | * return true if *addr is included in the monmap. | |
90 | */ | |
91 | int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) | |
92 | { | |
93 | int i; | |
94 | ||
95 | for (i = 0; i < m->num_mon; i++) | |
103e2d3a | 96 | if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) |
ba75bb98 SW |
97 | return 1; |
98 | return 0; | |
99 | } | |
100 | ||
5ce6e9db SW |
101 | /* |
102 | * Send an auth request. | |
103 | */ | |
104 | static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) | |
105 | { | |
106 | monc->pending_auth = 1; | |
107 | monc->m_auth->front.iov_len = len; | |
108 | monc->m_auth->hdr.front_len = cpu_to_le32(len); | |
6740a845 | 109 | ceph_msg_revoke(monc->m_auth); |
5ce6e9db | 110 | ceph_msg_get(monc->m_auth); /* keep our ref */ |
67130934 | 111 | ceph_con_send(&monc->con, monc->m_auth); |
5ce6e9db SW |
112 | } |
113 | ||
ba75bb98 SW |
114 | /* |
115 | * Close monitor session, if any. | |
116 | */ | |
117 | static void __close_session(struct ceph_mon_client *monc) | |
118 | { | |
f6a2f5be | 119 | dout("__close_session closing mon%d\n", monc->cur_mon); |
6740a845 | 120 | ceph_msg_revoke(monc->m_auth); |
4f471e4a SW |
121 | ceph_msg_revoke_incoming(monc->m_auth_reply); |
122 | ceph_msg_revoke(monc->m_subscribe); | |
123 | ceph_msg_revoke_incoming(monc->m_subscribe_ack); | |
67130934 | 124 | ceph_con_close(&monc->con); |
f6a2f5be SW |
125 | monc->cur_mon = -1; |
126 | monc->pending_auth = 0; | |
127 | ceph_auth_reset(monc->auth); | |
ba75bb98 SW |
128 | } |
129 | ||
130 | /* | |
131 | * Open a session with a (new) monitor. | |
132 | */ | |
133 | static int __open_session(struct ceph_mon_client *monc) | |
134 | { | |
135 | char r; | |
4e7a5dcd | 136 | int ret; |
ba75bb98 SW |
137 | |
138 | if (monc->cur_mon < 0) { | |
139 | get_random_bytes(&r, 1); | |
140 | monc->cur_mon = r % monc->monmap->num_mon; | |
141 | dout("open_session num=%d r=%d -> mon%d\n", | |
142 | monc->monmap->num_mon, r, monc->cur_mon); | |
143 | monc->sub_sent = 0; | |
144 | monc->sub_renew_after = jiffies; /* i.e., expired */ | |
145 | monc->want_next_osdmap = !!monc->want_next_osdmap; | |
146 | ||
20581c1f | 147 | dout("open_session mon%d opening\n", monc->cur_mon); |
67130934 | 148 | ceph_con_open(&monc->con, |
b7a9e5dd | 149 | CEPH_ENTITY_TYPE_MON, monc->cur_mon, |
ba75bb98 | 150 | &monc->monmap->mon_inst[monc->cur_mon].addr); |
4e7a5dcd SW |
151 | |
152 | /* initiatiate authentication handshake */ | |
153 | ret = ceph_auth_build_hello(monc->auth, | |
154 | monc->m_auth->front.iov_base, | |
155 | monc->m_auth->front_max); | |
5ce6e9db | 156 | __send_prepared_auth_request(monc, ret); |
ba75bb98 SW |
157 | } else { |
158 | dout("open_session mon%d already open\n", monc->cur_mon); | |
159 | } | |
160 | return 0; | |
161 | } | |
162 | ||
163 | static bool __sub_expired(struct ceph_mon_client *monc) | |
164 | { | |
165 | return time_after_eq(jiffies, monc->sub_renew_after); | |
166 | } | |
167 | ||
168 | /* | |
169 | * Reschedule delayed work timer. | |
170 | */ | |
171 | static void __schedule_delayed(struct ceph_mon_client *monc) | |
172 | { | |
95c96174 | 173 | unsigned int delay; |
ba75bb98 | 174 | |
4e7a5dcd | 175 | if (monc->cur_mon < 0 || __sub_expired(monc)) |
ba75bb98 SW |
176 | delay = 10 * HZ; |
177 | else | |
178 | delay = 20 * HZ; | |
179 | dout("__schedule_delayed after %u\n", delay); | |
180 | schedule_delayed_work(&monc->delayed_work, delay); | |
181 | } | |
182 | ||
183 | /* | |
184 | * Send subscribe request for mdsmap and/or osdmap. | |
185 | */ | |
186 | static void __send_subscribe(struct ceph_mon_client *monc) | |
187 | { | |
188 | dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", | |
95c96174 | 189 | (unsigned int)monc->sub_sent, __sub_expired(monc), |
ba75bb98 SW |
190 | monc->want_next_osdmap); |
191 | if ((__sub_expired(monc) && !monc->sub_sent) || | |
192 | monc->want_next_osdmap == 1) { | |
240ed68e | 193 | struct ceph_msg *msg = monc->m_subscribe; |
ba75bb98 SW |
194 | struct ceph_mon_subscribe_item *i; |
195 | void *p, *end; | |
3d14c5d2 | 196 | int num; |
ba75bb98 | 197 | |
ba75bb98 | 198 | p = msg->front.iov_base; |
240ed68e | 199 | end = p + msg->front_max; |
ba75bb98 | 200 | |
3d14c5d2 YS |
201 | num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap; |
202 | ceph_encode_32(&p, num); | |
203 | ||
ba75bb98 SW |
204 | if (monc->want_next_osdmap) { |
205 | dout("__send_subscribe to 'osdmap' %u\n", | |
95c96174 | 206 | (unsigned int)monc->have_osdmap); |
ba75bb98 SW |
207 | ceph_encode_string(&p, end, "osdmap", 6); |
208 | i = p; | |
209 | i->have = cpu_to_le64(monc->have_osdmap); | |
210 | i->onetime = 1; | |
211 | p += sizeof(*i); | |
212 | monc->want_next_osdmap = 2; /* requested */ | |
ba75bb98 | 213 | } |
3d14c5d2 YS |
214 | if (monc->want_mdsmap) { |
215 | dout("__send_subscribe to 'mdsmap' %u+\n", | |
95c96174 | 216 | (unsigned int)monc->have_mdsmap); |
3d14c5d2 YS |
217 | ceph_encode_string(&p, end, "mdsmap", 6); |
218 | i = p; | |
219 | i->have = cpu_to_le64(monc->have_mdsmap); | |
220 | i->onetime = 0; | |
221 | p += sizeof(*i); | |
222 | } | |
4e7a5dcd SW |
223 | ceph_encode_string(&p, end, "monmap", 6); |
224 | i = p; | |
225 | i->have = 0; | |
226 | i->onetime = 0; | |
227 | p += sizeof(*i); | |
ba75bb98 SW |
228 | |
229 | msg->front.iov_len = p - msg->front.iov_base; | |
230 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
6740a845 | 231 | ceph_msg_revoke(msg); |
67130934 | 232 | ceph_con_send(&monc->con, ceph_msg_get(msg)); |
ba75bb98 SW |
233 | |
234 | monc->sub_sent = jiffies | 1; /* never 0 */ | |
235 | } | |
236 | } | |
237 | ||
238 | static void handle_subscribe_ack(struct ceph_mon_client *monc, | |
239 | struct ceph_msg *msg) | |
240 | { | |
95c96174 | 241 | unsigned int seconds; |
07bd10fb SW |
242 | struct ceph_mon_subscribe_ack *h = msg->front.iov_base; |
243 | ||
244 | if (msg->front.iov_len < sizeof(*h)) | |
245 | goto bad; | |
246 | seconds = le32_to_cpu(h->duration); | |
ba75bb98 | 247 | |
ba75bb98 SW |
248 | mutex_lock(&monc->mutex); |
249 | if (monc->hunting) { | |
250 | pr_info("mon%d %s session established\n", | |
3d14c5d2 | 251 | monc->cur_mon, |
67130934 | 252 | ceph_pr_addr(&monc->con.peer_addr.in_addr)); |
ba75bb98 SW |
253 | monc->hunting = false; |
254 | } | |
255 | dout("handle_subscribe_ack after %d seconds\n", seconds); | |
0656d11b | 256 | monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1; |
ba75bb98 SW |
257 | monc->sub_sent = 0; |
258 | mutex_unlock(&monc->mutex); | |
259 | return; | |
260 | bad: | |
261 | pr_err("got corrupt subscribe-ack msg\n"); | |
9ec7cab1 | 262 | ceph_msg_dump(msg); |
ba75bb98 SW |
263 | } |
264 | ||
265 | /* | |
266 | * Keep track of which maps we have | |
267 | */ | |
268 | int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) | |
269 | { | |
270 | mutex_lock(&monc->mutex); | |
271 | monc->have_mdsmap = got; | |
272 | mutex_unlock(&monc->mutex); | |
273 | return 0; | |
274 | } | |
3d14c5d2 | 275 | EXPORT_SYMBOL(ceph_monc_got_mdsmap); |
ba75bb98 SW |
276 | |
277 | int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) | |
278 | { | |
279 | mutex_lock(&monc->mutex); | |
280 | monc->have_osdmap = got; | |
281 | monc->want_next_osdmap = 0; | |
282 | mutex_unlock(&monc->mutex); | |
283 | return 0; | |
284 | } | |
285 | ||
286 | /* | |
287 | * Register interest in the next osdmap | |
288 | */ | |
289 | void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) | |
290 | { | |
291 | dout("request_next_osdmap have %u\n", monc->have_osdmap); | |
292 | mutex_lock(&monc->mutex); | |
293 | if (!monc->want_next_osdmap) | |
294 | monc->want_next_osdmap = 1; | |
295 | if (monc->want_next_osdmap < 2) | |
296 | __send_subscribe(monc); | |
297 | mutex_unlock(&monc->mutex); | |
298 | } | |
299 | ||
4e7a5dcd | 300 | /* |
50b885b9 | 301 | * |
4e7a5dcd SW |
302 | */ |
303 | int ceph_monc_open_session(struct ceph_mon_client *monc) | |
ba75bb98 | 304 | { |
ba75bb98 | 305 | mutex_lock(&monc->mutex); |
4e7a5dcd | 306 | __open_session(monc); |
ba75bb98 SW |
307 | __schedule_delayed(monc); |
308 | mutex_unlock(&monc->mutex); | |
309 | return 0; | |
310 | } | |
3d14c5d2 | 311 | EXPORT_SYMBOL(ceph_monc_open_session); |
ba75bb98 | 312 | |
d1c338a5 SW |
313 | /* |
314 | * We require the fsid and global_id in order to initialize our | |
315 | * debugfs dir. | |
316 | */ | |
317 | static bool have_debugfs_info(struct ceph_mon_client *monc) | |
318 | { | |
319 | dout("have_debugfs_info fsid %d globalid %lld\n", | |
320 | (int)monc->client->have_fsid, monc->auth->global_id); | |
321 | return monc->client->have_fsid && monc->auth->global_id > 0; | |
322 | } | |
323 | ||
4e7a5dcd SW |
324 | /* |
325 | * The monitor responds with mount ack indicate mount success. The | |
326 | * included client ticket allows the client to talk to MDSs and OSDs. | |
327 | */ | |
0743304d SW |
328 | static void ceph_monc_handle_map(struct ceph_mon_client *monc, |
329 | struct ceph_msg *msg) | |
4e7a5dcd SW |
330 | { |
331 | struct ceph_client *client = monc->client; | |
332 | struct ceph_monmap *monmap = NULL, *old = monc->monmap; | |
333 | void *p, *end; | |
d1c338a5 | 334 | int had_debugfs_info, init_debugfs = 0; |
4e7a5dcd SW |
335 | |
336 | mutex_lock(&monc->mutex); | |
337 | ||
d1c338a5 SW |
338 | had_debugfs_info = have_debugfs_info(monc); |
339 | ||
4e7a5dcd SW |
340 | dout("handle_monmap\n"); |
341 | p = msg->front.iov_base; | |
342 | end = p + msg->front.iov_len; | |
343 | ||
344 | monmap = ceph_monmap_decode(p, end); | |
345 | if (IS_ERR(monmap)) { | |
346 | pr_err("problem decoding monmap, %d\n", | |
347 | (int)PTR_ERR(monmap)); | |
d4a780ce | 348 | goto out; |
4e7a5dcd | 349 | } |
0743304d SW |
350 | |
351 | if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { | |
4e7a5dcd | 352 | kfree(monmap); |
d4a780ce | 353 | goto out; |
4e7a5dcd SW |
354 | } |
355 | ||
356 | client->monc.monmap = monmap; | |
4e7a5dcd SW |
357 | kfree(old); |
358 | ||
ab434b60 SW |
359 | if (!client->have_fsid) { |
360 | client->have_fsid = true; | |
d1c338a5 SW |
361 | if (!had_debugfs_info && have_debugfs_info(monc)) { |
362 | pr_info("client%lld fsid %pU\n", | |
363 | ceph_client_id(monc->client), | |
364 | &monc->client->fsid); | |
365 | init_debugfs = 1; | |
366 | } | |
ab434b60 | 367 | mutex_unlock(&monc->mutex); |
d1c338a5 SW |
368 | |
369 | if (init_debugfs) { | |
370 | /* | |
371 | * do debugfs initialization without mutex to avoid | |
372 | * creating a locking dependency | |
373 | */ | |
374 | ceph_debugfs_client_init(monc->client); | |
375 | } | |
376 | ||
ab434b60 SW |
377 | goto out_unlocked; |
378 | } | |
d4a780ce | 379 | out: |
4e7a5dcd | 380 | mutex_unlock(&monc->mutex); |
ab434b60 | 381 | out_unlocked: |
03066f23 | 382 | wake_up_all(&client->auth_wq); |
4e7a5dcd SW |
383 | } |
384 | ||
ba75bb98 | 385 | /* |
e56fa10e | 386 | * generic requests (e.g., statfs, poolop) |
ba75bb98 | 387 | */ |
f8c76f6f | 388 | static struct ceph_mon_generic_request *__lookup_generic_req( |
85ff03f6 SW |
389 | struct ceph_mon_client *monc, u64 tid) |
390 | { | |
f8c76f6f YS |
391 | struct ceph_mon_generic_request *req; |
392 | struct rb_node *n = monc->generic_request_tree.rb_node; | |
85ff03f6 SW |
393 | |
394 | while (n) { | |
f8c76f6f | 395 | req = rb_entry(n, struct ceph_mon_generic_request, node); |
85ff03f6 SW |
396 | if (tid < req->tid) |
397 | n = n->rb_left; | |
398 | else if (tid > req->tid) | |
399 | n = n->rb_right; | |
400 | else | |
401 | return req; | |
402 | } | |
403 | return NULL; | |
404 | } | |
405 | ||
f8c76f6f YS |
406 | static void __insert_generic_request(struct ceph_mon_client *monc, |
407 | struct ceph_mon_generic_request *new) | |
85ff03f6 | 408 | { |
f8c76f6f | 409 | struct rb_node **p = &monc->generic_request_tree.rb_node; |
85ff03f6 | 410 | struct rb_node *parent = NULL; |
f8c76f6f | 411 | struct ceph_mon_generic_request *req = NULL; |
85ff03f6 SW |
412 | |
413 | while (*p) { | |
414 | parent = *p; | |
f8c76f6f | 415 | req = rb_entry(parent, struct ceph_mon_generic_request, node); |
85ff03f6 SW |
416 | if (new->tid < req->tid) |
417 | p = &(*p)->rb_left; | |
418 | else if (new->tid > req->tid) | |
419 | p = &(*p)->rb_right; | |
420 | else | |
421 | BUG(); | |
422 | } | |
423 | ||
424 | rb_link_node(&new->node, parent, p); | |
f8c76f6f | 425 | rb_insert_color(&new->node, &monc->generic_request_tree); |
85ff03f6 SW |
426 | } |
427 | ||
f8c76f6f | 428 | static void release_generic_request(struct kref *kref) |
3143edd3 | 429 | { |
f8c76f6f YS |
430 | struct ceph_mon_generic_request *req = |
431 | container_of(kref, struct ceph_mon_generic_request, kref); | |
3143edd3 SW |
432 | |
433 | if (req->reply) | |
434 | ceph_msg_put(req->reply); | |
435 | if (req->request) | |
436 | ceph_msg_put(req->request); | |
20547567 YS |
437 | |
438 | kfree(req); | |
3143edd3 SW |
439 | } |
440 | ||
f8c76f6f | 441 | static void put_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 | 442 | { |
f8c76f6f | 443 | kref_put(&req->kref, release_generic_request); |
3143edd3 SW |
444 | } |
445 | ||
f8c76f6f | 446 | static void get_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 SW |
447 | { |
448 | kref_get(&req->kref); | |
449 | } | |
450 | ||
f8c76f6f | 451 | static struct ceph_msg *get_generic_reply(struct ceph_connection *con, |
3143edd3 SW |
452 | struct ceph_msg_header *hdr, |
453 | int *skip) | |
454 | { | |
455 | struct ceph_mon_client *monc = con->private; | |
f8c76f6f | 456 | struct ceph_mon_generic_request *req; |
3143edd3 SW |
457 | u64 tid = le64_to_cpu(hdr->tid); |
458 | struct ceph_msg *m; | |
459 | ||
460 | mutex_lock(&monc->mutex); | |
f8c76f6f | 461 | req = __lookup_generic_req(monc, tid); |
3143edd3 | 462 | if (!req) { |
f8c76f6f | 463 | dout("get_generic_reply %lld dne\n", tid); |
3143edd3 SW |
464 | *skip = 1; |
465 | m = NULL; | |
466 | } else { | |
f8c76f6f | 467 | dout("get_generic_reply %lld got %p\n", tid, req->reply); |
1c20f2d2 | 468 | *skip = 0; |
3143edd3 SW |
469 | m = ceph_msg_get(req->reply); |
470 | /* | |
471 | * we don't need to track the connection reading into | |
472 | * this reply because we only have one open connection | |
473 | * at a time, ever. | |
474 | */ | |
475 | } | |
476 | mutex_unlock(&monc->mutex); | |
477 | return m; | |
478 | } | |
479 | ||
e56fa10e YS |
480 | static int do_generic_request(struct ceph_mon_client *monc, |
481 | struct ceph_mon_generic_request *req) | |
482 | { | |
483 | int err; | |
484 | ||
485 | /* register request */ | |
486 | mutex_lock(&monc->mutex); | |
487 | req->tid = ++monc->last_tid; | |
488 | req->request->hdr.tid = cpu_to_le64(req->tid); | |
489 | __insert_generic_request(monc, req); | |
490 | monc->num_generic_requests++; | |
67130934 | 491 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); |
e56fa10e YS |
492 | mutex_unlock(&monc->mutex); |
493 | ||
494 | err = wait_for_completion_interruptible(&req->completion); | |
495 | ||
496 | mutex_lock(&monc->mutex); | |
497 | rb_erase(&req->node, &monc->generic_request_tree); | |
498 | monc->num_generic_requests--; | |
499 | mutex_unlock(&monc->mutex); | |
500 | ||
501 | if (!err) | |
502 | err = req->result; | |
503 | return err; | |
504 | } | |
505 | ||
506 | /* | |
507 | * statfs | |
508 | */ | |
ba75bb98 SW |
509 | static void handle_statfs_reply(struct ceph_mon_client *monc, |
510 | struct ceph_msg *msg) | |
511 | { | |
f8c76f6f | 512 | struct ceph_mon_generic_request *req; |
ba75bb98 | 513 | struct ceph_mon_statfs_reply *reply = msg->front.iov_base; |
3143edd3 | 514 | u64 tid = le64_to_cpu(msg->hdr.tid); |
ba75bb98 SW |
515 | |
516 | if (msg->front.iov_len != sizeof(*reply)) | |
517 | goto bad; | |
ba75bb98 SW |
518 | dout("handle_statfs_reply %p tid %llu\n", msg, tid); |
519 | ||
520 | mutex_lock(&monc->mutex); | |
f8c76f6f | 521 | req = __lookup_generic_req(monc, tid); |
ba75bb98 | 522 | if (req) { |
f8c76f6f | 523 | *(struct ceph_statfs *)req->buf = reply->st; |
ba75bb98 | 524 | req->result = 0; |
f8c76f6f | 525 | get_generic_request(req); |
ba75bb98 SW |
526 | } |
527 | mutex_unlock(&monc->mutex); | |
3143edd3 | 528 | if (req) { |
03066f23 | 529 | complete_all(&req->completion); |
f8c76f6f | 530 | put_generic_request(req); |
3143edd3 | 531 | } |
ba75bb98 SW |
532 | return; |
533 | ||
534 | bad: | |
e56fa10e | 535 | pr_err("corrupt generic reply, tid %llu\n", tid); |
9ec7cab1 | 536 | ceph_msg_dump(msg); |
ba75bb98 SW |
537 | } |
538 | ||
539 | /* | |
3143edd3 | 540 | * Do a synchronous statfs(). |
ba75bb98 | 541 | */ |
3143edd3 | 542 | int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) |
ba75bb98 | 543 | { |
f8c76f6f | 544 | struct ceph_mon_generic_request *req; |
ba75bb98 | 545 | struct ceph_mon_statfs *h; |
3143edd3 SW |
546 | int err; |
547 | ||
cffe7b6d | 548 | req = kzalloc(sizeof(*req), GFP_NOFS); |
3143edd3 SW |
549 | if (!req) |
550 | return -ENOMEM; | |
551 | ||
3143edd3 SW |
552 | kref_init(&req->kref); |
553 | req->buf = buf; | |
e56fa10e | 554 | req->buf_len = sizeof(*buf); |
3143edd3 | 555 | init_completion(&req->completion); |
ba75bb98 | 556 | |
a79832f2 | 557 | err = -ENOMEM; |
b61c2763 SW |
558 | req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, |
559 | true); | |
a79832f2 | 560 | if (!req->request) |
3143edd3 | 561 | goto out; |
b61c2763 SW |
562 | req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS, |
563 | true); | |
a79832f2 | 564 | if (!req->reply) |
3143edd3 | 565 | goto out; |
3143edd3 SW |
566 | |
567 | /* fill out request */ | |
568 | h = req->request->front.iov_base; | |
13e38c8a SW |
569 | h->monhdr.have_version = 0; |
570 | h->monhdr.session_mon = cpu_to_le16(-1); | |
571 | h->monhdr.session_mon_tid = 0; | |
ba75bb98 | 572 | h->fsid = monc->monmap->fsid; |
ba75bb98 | 573 | |
e56fa10e | 574 | err = do_generic_request(monc, req); |
ba75bb98 | 575 | |
e56fa10e YS |
576 | out: |
577 | kref_put(&req->kref, release_generic_request); | |
578 | return err; | |
579 | } | |
3d14c5d2 | 580 | EXPORT_SYMBOL(ceph_monc_do_statfs); |
e56fa10e YS |
581 | |
582 | /* | |
583 | * pool ops | |
584 | */ | |
585 | static int get_poolop_reply_buf(const char *src, size_t src_len, | |
586 | char *dst, size_t dst_len) | |
587 | { | |
588 | u32 buf_len; | |
589 | ||
590 | if (src_len != sizeof(u32) + dst_len) | |
591 | return -EINVAL; | |
592 | ||
593 | buf_len = le32_to_cpu(*(u32 *)src); | |
594 | if (buf_len != dst_len) | |
595 | return -EINVAL; | |
596 | ||
597 | memcpy(dst, src + sizeof(u32), dst_len); | |
598 | return 0; | |
599 | } | |
600 | ||
601 | static void handle_poolop_reply(struct ceph_mon_client *monc, | |
602 | struct ceph_msg *msg) | |
603 | { | |
604 | struct ceph_mon_generic_request *req; | |
605 | struct ceph_mon_poolop_reply *reply = msg->front.iov_base; | |
606 | u64 tid = le64_to_cpu(msg->hdr.tid); | |
607 | ||
608 | if (msg->front.iov_len < sizeof(*reply)) | |
609 | goto bad; | |
610 | dout("handle_poolop_reply %p tid %llu\n", msg, tid); | |
ba75bb98 SW |
611 | |
612 | mutex_lock(&monc->mutex); | |
e56fa10e YS |
613 | req = __lookup_generic_req(monc, tid); |
614 | if (req) { | |
615 | if (req->buf_len && | |
616 | get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply), | |
617 | msg->front.iov_len - sizeof(*reply), | |
618 | req->buf, req->buf_len) < 0) { | |
619 | mutex_unlock(&monc->mutex); | |
620 | goto bad; | |
621 | } | |
622 | req->result = le32_to_cpu(reply->reply_code); | |
623 | get_generic_request(req); | |
624 | } | |
ba75bb98 | 625 | mutex_unlock(&monc->mutex); |
e56fa10e YS |
626 | if (req) { |
627 | complete(&req->completion); | |
628 | put_generic_request(req); | |
629 | } | |
630 | return; | |
ba75bb98 | 631 | |
e56fa10e YS |
632 | bad: |
633 | pr_err("corrupt generic reply, tid %llu\n", tid); | |
634 | ceph_msg_dump(msg); | |
635 | } | |
636 | ||
637 | /* | |
638 | * Do a synchronous pool op. | |
639 | */ | |
640 | int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op, | |
641 | u32 pool, u64 snapid, | |
642 | char *buf, int len) | |
643 | { | |
644 | struct ceph_mon_generic_request *req; | |
645 | struct ceph_mon_poolop *h; | |
646 | int err; | |
647 | ||
648 | req = kzalloc(sizeof(*req), GFP_NOFS); | |
649 | if (!req) | |
650 | return -ENOMEM; | |
651 | ||
652 | kref_init(&req->kref); | |
653 | req->buf = buf; | |
654 | req->buf_len = len; | |
655 | init_completion(&req->completion); | |
656 | ||
657 | err = -ENOMEM; | |
b61c2763 SW |
658 | req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS, |
659 | true); | |
e56fa10e YS |
660 | if (!req->request) |
661 | goto out; | |
b61c2763 SW |
662 | req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS, |
663 | true); | |
e56fa10e YS |
664 | if (!req->reply) |
665 | goto out; | |
666 | ||
667 | /* fill out request */ | |
668 | req->request->hdr.version = cpu_to_le16(2); | |
669 | h = req->request->front.iov_base; | |
670 | h->monhdr.have_version = 0; | |
671 | h->monhdr.session_mon = cpu_to_le16(-1); | |
672 | h->monhdr.session_mon_tid = 0; | |
673 | h->fsid = monc->monmap->fsid; | |
674 | h->pool = cpu_to_le32(pool); | |
675 | h->op = cpu_to_le32(op); | |
676 | h->auid = 0; | |
677 | h->snapid = cpu_to_le64(snapid); | |
678 | h->name_len = 0; | |
679 | ||
680 | err = do_generic_request(monc, req); | |
3143edd3 SW |
681 | |
682 | out: | |
f8c76f6f | 683 | kref_put(&req->kref, release_generic_request); |
ba75bb98 SW |
684 | return err; |
685 | } | |
686 | ||
e56fa10e YS |
687 | int ceph_monc_create_snapid(struct ceph_mon_client *monc, |
688 | u32 pool, u64 *snapid) | |
689 | { | |
690 | return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, | |
691 | pool, 0, (char *)snapid, sizeof(*snapid)); | |
692 | ||
693 | } | |
3d14c5d2 | 694 | EXPORT_SYMBOL(ceph_monc_create_snapid); |
e56fa10e YS |
695 | |
696 | int ceph_monc_delete_snapid(struct ceph_mon_client *monc, | |
697 | u32 pool, u64 snapid) | |
698 | { | |
699 | return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, | |
700 | pool, snapid, 0, 0); | |
701 | ||
702 | } | |
703 | ||
ba75bb98 | 704 | /* |
e56fa10e | 705 | * Resend pending generic requests. |
ba75bb98 | 706 | */ |
f8c76f6f | 707 | static void __resend_generic_request(struct ceph_mon_client *monc) |
ba75bb98 | 708 | { |
f8c76f6f | 709 | struct ceph_mon_generic_request *req; |
85ff03f6 | 710 | struct rb_node *p; |
ba75bb98 | 711 | |
f8c76f6f YS |
712 | for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { |
713 | req = rb_entry(p, struct ceph_mon_generic_request, node); | |
6740a845 | 714 | ceph_msg_revoke(req->request); |
4f471e4a | 715 | ceph_msg_revoke_incoming(req->reply); |
67130934 | 716 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); |
ba75bb98 SW |
717 | } |
718 | } | |
719 | ||
720 | /* | |
721 | * Delayed work. If we haven't mounted yet, retry. Otherwise, | |
722 | * renew/retry subscription as needed (in case it is timing out, or we | |
723 | * got an ENOMEM). And keep the monitor connection alive. | |
724 | */ | |
725 | static void delayed_work(struct work_struct *work) | |
726 | { | |
727 | struct ceph_mon_client *monc = | |
728 | container_of(work, struct ceph_mon_client, delayed_work.work); | |
729 | ||
730 | dout("monc delayed_work\n"); | |
731 | mutex_lock(&monc->mutex); | |
4e7a5dcd SW |
732 | if (monc->hunting) { |
733 | __close_session(monc); | |
734 | __open_session(monc); /* continue hunting */ | |
ba75bb98 | 735 | } else { |
67130934 | 736 | ceph_con_keepalive(&monc->con); |
9bd2e6f8 SW |
737 | |
738 | __validate_auth(monc); | |
739 | ||
4e7a5dcd SW |
740 | if (monc->auth->ops->is_authenticated(monc->auth)) |
741 | __send_subscribe(monc); | |
ba75bb98 | 742 | } |
ba75bb98 SW |
743 | __schedule_delayed(monc); |
744 | mutex_unlock(&monc->mutex); | |
745 | } | |
746 | ||
6b805185 SW |
747 | /* |
748 | * On startup, we build a temporary monmap populated with the IPs | |
749 | * provided by mount(2). | |
750 | */ | |
751 | static int build_initial_monmap(struct ceph_mon_client *monc) | |
752 | { | |
3d14c5d2 YS |
753 | struct ceph_options *opt = monc->client->options; |
754 | struct ceph_entity_addr *mon_addr = opt->mon_addr; | |
755 | int num_mon = opt->num_mon; | |
6b805185 SW |
756 | int i; |
757 | ||
758 | /* build initial monmap */ | |
759 | monc->monmap = kzalloc(sizeof(*monc->monmap) + | |
760 | num_mon*sizeof(monc->monmap->mon_inst[0]), | |
761 | GFP_KERNEL); | |
762 | if (!monc->monmap) | |
763 | return -ENOMEM; | |
764 | for (i = 0; i < num_mon; i++) { | |
765 | monc->monmap->mon_inst[i].addr = mon_addr[i]; | |
6b805185 SW |
766 | monc->monmap->mon_inst[i].addr.nonce = 0; |
767 | monc->monmap->mon_inst[i].name.type = | |
768 | CEPH_ENTITY_TYPE_MON; | |
769 | monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); | |
770 | } | |
771 | monc->monmap->num_mon = num_mon; | |
4e7a5dcd | 772 | monc->have_fsid = false; |
6b805185 SW |
773 | return 0; |
774 | } | |
775 | ||
ba75bb98 SW |
776 | int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) |
777 | { | |
778 | int err = 0; | |
779 | ||
780 | dout("init\n"); | |
781 | memset(monc, 0, sizeof(*monc)); | |
782 | monc->client = cl; | |
783 | monc->monmap = NULL; | |
784 | mutex_init(&monc->mutex); | |
785 | ||
6b805185 SW |
786 | err = build_initial_monmap(monc); |
787 | if (err) | |
788 | goto out; | |
789 | ||
f6a2f5be | 790 | /* connection */ |
4e7a5dcd | 791 | /* authentication */ |
3d14c5d2 | 792 | monc->auth = ceph_auth_init(cl->options->name, |
8323c3aa | 793 | cl->options->key); |
49d9224c NW |
794 | if (IS_ERR(monc->auth)) { |
795 | err = PTR_ERR(monc->auth); | |
67130934 | 796 | goto out_monmap; |
49d9224c | 797 | } |
4e7a5dcd SW |
798 | monc->auth->want_keys = |
799 | CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | | |
800 | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; | |
801 | ||
240ed68e | 802 | /* msgs */ |
a79832f2 | 803 | err = -ENOMEM; |
7c315c55 | 804 | monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, |
34d23762 | 805 | sizeof(struct ceph_mon_subscribe_ack), |
b61c2763 | 806 | GFP_NOFS, true); |
a79832f2 | 807 | if (!monc->m_subscribe_ack) |
49d9224c | 808 | goto out_auth; |
6694d6b9 | 809 | |
b61c2763 SW |
810 | monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS, |
811 | true); | |
240ed68e SW |
812 | if (!monc->m_subscribe) |
813 | goto out_subscribe_ack; | |
814 | ||
b61c2763 SW |
815 | monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS, |
816 | true); | |
a79832f2 | 817 | if (!monc->m_auth_reply) |
240ed68e | 818 | goto out_subscribe; |
4e7a5dcd | 819 | |
b61c2763 | 820 | monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true); |
9bd2e6f8 | 821 | monc->pending_auth = 0; |
a79832f2 | 822 | if (!monc->m_auth) |
6694d6b9 | 823 | goto out_auth_reply; |
ba75bb98 | 824 | |
735a72ef SW |
825 | ceph_con_init(&monc->con, monc, &mon_con_ops, |
826 | &monc->client->msgr); | |
827 | ||
ba75bb98 | 828 | monc->cur_mon = -1; |
4e7a5dcd | 829 | monc->hunting = true; |
ba75bb98 SW |
830 | monc->sub_renew_after = jiffies; |
831 | monc->sub_sent = 0; | |
832 | ||
833 | INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); | |
f8c76f6f YS |
834 | monc->generic_request_tree = RB_ROOT; |
835 | monc->num_generic_requests = 0; | |
ba75bb98 SW |
836 | monc->last_tid = 0; |
837 | ||
838 | monc->have_mdsmap = 0; | |
839 | monc->have_osdmap = 0; | |
840 | monc->want_next_osdmap = 1; | |
4e7a5dcd SW |
841 | return 0; |
842 | ||
6694d6b9 SW |
843 | out_auth_reply: |
844 | ceph_msg_put(monc->m_auth_reply); | |
240ed68e SW |
845 | out_subscribe: |
846 | ceph_msg_put(monc->m_subscribe); | |
7c315c55 SW |
847 | out_subscribe_ack: |
848 | ceph_msg_put(monc->m_subscribe_ack); | |
49d9224c NW |
849 | out_auth: |
850 | ceph_auth_destroy(monc->auth); | |
4e7a5dcd SW |
851 | out_monmap: |
852 | kfree(monc->monmap); | |
ba75bb98 SW |
853 | out: |
854 | return err; | |
855 | } | |
3d14c5d2 | 856 | EXPORT_SYMBOL(ceph_monc_init); |
ba75bb98 SW |
857 | |
858 | void ceph_monc_stop(struct ceph_mon_client *monc) | |
859 | { | |
860 | dout("stop\n"); | |
861 | cancel_delayed_work_sync(&monc->delayed_work); | |
862 | ||
863 | mutex_lock(&monc->mutex); | |
864 | __close_session(monc); | |
f6a2f5be | 865 | |
ba75bb98 SW |
866 | mutex_unlock(&monc->mutex); |
867 | ||
f3dea7ed SW |
868 | /* |
869 | * flush msgr queue before we destroy ourselves to ensure that: | |
870 | * - any work that references our embedded con is finished. | |
871 | * - any osd_client or other work that may reference an authorizer | |
872 | * finishes before we shut down the auth subsystem. | |
873 | */ | |
874 | ceph_msgr_flush(); | |
875 | ||
4e7a5dcd SW |
876 | ceph_auth_destroy(monc->auth); |
877 | ||
878 | ceph_msg_put(monc->m_auth); | |
6694d6b9 | 879 | ceph_msg_put(monc->m_auth_reply); |
240ed68e | 880 | ceph_msg_put(monc->m_subscribe); |
7c315c55 | 881 | ceph_msg_put(monc->m_subscribe_ack); |
ba75bb98 SW |
882 | |
883 | kfree(monc->monmap); | |
884 | } | |
3d14c5d2 | 885 | EXPORT_SYMBOL(ceph_monc_stop); |
ba75bb98 | 886 | |
4e7a5dcd SW |
887 | static void handle_auth_reply(struct ceph_mon_client *monc, |
888 | struct ceph_msg *msg) | |
889 | { | |
890 | int ret; | |
09c4d6a7 | 891 | int was_auth = 0; |
d1c338a5 | 892 | int had_debugfs_info, init_debugfs = 0; |
4e7a5dcd SW |
893 | |
894 | mutex_lock(&monc->mutex); | |
d1c338a5 | 895 | had_debugfs_info = have_debugfs_info(monc); |
09c4d6a7 SW |
896 | if (monc->auth->ops) |
897 | was_auth = monc->auth->ops->is_authenticated(monc->auth); | |
9bd2e6f8 | 898 | monc->pending_auth = 0; |
4e7a5dcd SW |
899 | ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, |
900 | msg->front.iov_len, | |
901 | monc->m_auth->front.iov_base, | |
902 | monc->m_auth->front_max); | |
903 | if (ret < 0) { | |
9bd2e6f8 | 904 | monc->client->auth_err = ret; |
03066f23 | 905 | wake_up_all(&monc->client->auth_wq); |
4e7a5dcd | 906 | } else if (ret > 0) { |
9bd2e6f8 | 907 | __send_prepared_auth_request(monc, ret); |
09c4d6a7 | 908 | } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { |
4e7a5dcd | 909 | dout("authenticated, starting session\n"); |
0743304d | 910 | |
15d9882c AE |
911 | monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; |
912 | monc->client->msgr.inst.name.num = | |
0cf5537b | 913 | cpu_to_le64(monc->auth->global_id); |
0743304d | 914 | |
4e7a5dcd | 915 | __send_subscribe(monc); |
f8c76f6f | 916 | __resend_generic_request(monc); |
4e7a5dcd | 917 | } |
d1c338a5 SW |
918 | |
919 | if (!had_debugfs_info && have_debugfs_info(monc)) { | |
920 | pr_info("client%lld fsid %pU\n", | |
921 | ceph_client_id(monc->client), | |
922 | &monc->client->fsid); | |
923 | init_debugfs = 1; | |
924 | } | |
4e7a5dcd | 925 | mutex_unlock(&monc->mutex); |
d1c338a5 SW |
926 | |
927 | if (init_debugfs) { | |
928 | /* | |
929 | * do debugfs initialization without mutex to avoid | |
930 | * creating a locking dependency | |
931 | */ | |
932 | ceph_debugfs_client_init(monc->client); | |
933 | } | |
4e7a5dcd SW |
934 | } |
935 | ||
9bd2e6f8 SW |
936 | static int __validate_auth(struct ceph_mon_client *monc) |
937 | { | |
938 | int ret; | |
939 | ||
940 | if (monc->pending_auth) | |
941 | return 0; | |
942 | ||
943 | ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, | |
944 | monc->m_auth->front_max); | |
945 | if (ret <= 0) | |
946 | return ret; /* either an error, or no need to authenticate */ | |
947 | __send_prepared_auth_request(monc, ret); | |
948 | return 0; | |
949 | } | |
950 | ||
951 | int ceph_monc_validate_auth(struct ceph_mon_client *monc) | |
952 | { | |
953 | int ret; | |
954 | ||
955 | mutex_lock(&monc->mutex); | |
956 | ret = __validate_auth(monc); | |
957 | mutex_unlock(&monc->mutex); | |
958 | return ret; | |
959 | } | |
3d14c5d2 | 960 | EXPORT_SYMBOL(ceph_monc_validate_auth); |
9bd2e6f8 | 961 | |
ba75bb98 SW |
962 | /* |
963 | * handle incoming message | |
964 | */ | |
965 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
966 | { | |
967 | struct ceph_mon_client *monc = con->private; | |
968 | int type = le16_to_cpu(msg->hdr.type); | |
969 | ||
970 | if (!monc) | |
971 | return; | |
972 | ||
973 | switch (type) { | |
4e7a5dcd SW |
974 | case CEPH_MSG_AUTH_REPLY: |
975 | handle_auth_reply(monc, msg); | |
ba75bb98 SW |
976 | break; |
977 | ||
978 | case CEPH_MSG_MON_SUBSCRIBE_ACK: | |
979 | handle_subscribe_ack(monc, msg); | |
980 | break; | |
981 | ||
982 | case CEPH_MSG_STATFS_REPLY: | |
983 | handle_statfs_reply(monc, msg); | |
984 | break; | |
985 | ||
e56fa10e YS |
986 | case CEPH_MSG_POOLOP_REPLY: |
987 | handle_poolop_reply(monc, msg); | |
988 | break; | |
989 | ||
4e7a5dcd SW |
990 | case CEPH_MSG_MON_MAP: |
991 | ceph_monc_handle_map(monc, msg); | |
992 | break; | |
993 | ||
ba75bb98 SW |
994 | case CEPH_MSG_OSD_MAP: |
995 | ceph_osdc_handle_map(&monc->client->osdc, msg); | |
996 | break; | |
997 | ||
998 | default: | |
3d14c5d2 YS |
999 | /* can the chained handler handle it? */ |
1000 | if (monc->client->extra_mon_dispatch && | |
1001 | monc->client->extra_mon_dispatch(monc->client, msg) == 0) | |
1002 | break; | |
1003 | ||
ba75bb98 SW |
1004 | pr_err("received unknown message type %d %s\n", type, |
1005 | ceph_msg_type_name(type)); | |
1006 | } | |
1007 | ceph_msg_put(msg); | |
1008 | } | |
1009 | ||
1010 | /* | |
1011 | * Allocate memory for incoming message | |
1012 | */ | |
1013 | static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, | |
2450418c YS |
1014 | struct ceph_msg_header *hdr, |
1015 | int *skip) | |
ba75bb98 SW |
1016 | { |
1017 | struct ceph_mon_client *monc = con->private; | |
1018 | int type = le16_to_cpu(hdr->type); | |
2450418c | 1019 | int front_len = le32_to_cpu(hdr->front_len); |
5b3a4db3 | 1020 | struct ceph_msg *m = NULL; |
ba75bb98 | 1021 | |
2450418c | 1022 | *skip = 0; |
0547a9b3 | 1023 | |
ba75bb98 | 1024 | switch (type) { |
ba75bb98 | 1025 | case CEPH_MSG_MON_SUBSCRIBE_ACK: |
7c315c55 | 1026 | m = ceph_msg_get(monc->m_subscribe_ack); |
2450418c | 1027 | break; |
e56fa10e | 1028 | case CEPH_MSG_POOLOP_REPLY: |
ba75bb98 | 1029 | case CEPH_MSG_STATFS_REPLY: |
f8c76f6f | 1030 | return get_generic_reply(con, hdr, skip); |
4e7a5dcd | 1031 | case CEPH_MSG_AUTH_REPLY: |
6694d6b9 | 1032 | m = ceph_msg_get(monc->m_auth_reply); |
2450418c | 1033 | break; |
5b3a4db3 SW |
1034 | case CEPH_MSG_MON_MAP: |
1035 | case CEPH_MSG_MDS_MAP: | |
1036 | case CEPH_MSG_OSD_MAP: | |
b61c2763 | 1037 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); |
1c20f2d2 AE |
1038 | if (!m) |
1039 | return NULL; /* ENOMEM--return skip == 0 */ | |
5b3a4db3 | 1040 | break; |
ba75bb98 | 1041 | } |
2450418c | 1042 | |
5b3a4db3 SW |
1043 | if (!m) { |
1044 | pr_info("alloc_msg unknown type %d\n", type); | |
2450418c | 1045 | *skip = 1; |
5b3a4db3 | 1046 | } |
2450418c | 1047 | return m; |
ba75bb98 SW |
1048 | } |
1049 | ||
1050 | /* | |
1051 | * If the monitor connection resets, pick a new monitor and resubmit | |
1052 | * any pending requests. | |
1053 | */ | |
1054 | static void mon_fault(struct ceph_connection *con) | |
1055 | { | |
1056 | struct ceph_mon_client *monc = con->private; | |
1057 | ||
1058 | if (!monc) | |
1059 | return; | |
1060 | ||
1061 | dout("mon_fault\n"); | |
1062 | mutex_lock(&monc->mutex); | |
1063 | if (!con->private) | |
1064 | goto out; | |
1065 | ||
f6a2f5be | 1066 | if (!monc->hunting) |
ba75bb98 SW |
1067 | pr_info("mon%d %s session lost, " |
1068 | "hunting for new mon\n", monc->cur_mon, | |
67130934 | 1069 | ceph_pr_addr(&monc->con.peer_addr.in_addr)); |
ba75bb98 SW |
1070 | |
1071 | __close_session(monc); | |
1072 | if (!monc->hunting) { | |
1073 | /* start hunting */ | |
1074 | monc->hunting = true; | |
4e7a5dcd | 1075 | __open_session(monc); |
ba75bb98 SW |
1076 | } else { |
1077 | /* already hunting, let's wait a bit */ | |
1078 | __schedule_delayed(monc); | |
1079 | } | |
1080 | out: | |
1081 | mutex_unlock(&monc->mutex); | |
1082 | } | |
1083 | ||
ec87ef43 SW |
1084 | /* |
1085 | * We can ignore refcounting on the connection struct, as all references | |
1086 | * will come from the messenger workqueue, which is drained prior to | |
1087 | * mon_client destruction. | |
1088 | */ | |
1089 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
1090 | { | |
1091 | return con; | |
1092 | } | |
1093 | ||
1094 | static void con_put(struct ceph_connection *con) | |
1095 | { | |
1096 | } | |
1097 | ||
9e32789f | 1098 | static const struct ceph_connection_operations mon_con_ops = { |
ec87ef43 SW |
1099 | .get = con_get, |
1100 | .put = con_put, | |
ba75bb98 SW |
1101 | .dispatch = dispatch, |
1102 | .fault = mon_fault, | |
1103 | .alloc_msg = mon_alloc_msg, | |
ba75bb98 | 1104 | }; |