]>
Commit | Line | Data |
---|---|---|
f5db4af4 JB |
1 | /* |
2 | * Copyright (C) 2006-2009 Red Hat, Inc. | |
3 | * | |
4 | * This file is released under the LGPL. | |
5 | */ | |
6 | ||
7 | #include <linux/bio.h> | |
5a0e3ad6 | 8 | #include <linux/slab.h> |
f5db4af4 JB |
9 | #include <linux/dm-dirty-log.h> |
10 | #include <linux/device-mapper.h> | |
11 | #include <linux/dm-log-userspace.h> | |
056075c7 | 12 | #include <linux/module.h> |
f5db4af4 JB |
13 | |
14 | #include "dm-log-userspace-transfer.h" | |
15 | ||
86a54a48 JB |
16 | #define DM_LOG_USERSPACE_VSN "1.1.0" |
17 | ||
f5db4af4 JB |
18 | struct flush_entry { |
19 | int type; | |
20 | region_t region; | |
21 | struct list_head list; | |
22 | }; | |
23 | ||
085ae065 JB |
24 | /* |
25 | * This limit on the number of mark and clear request is, to a degree, | |
26 | * arbitrary. However, there is some basis for the choice in the limits | |
27 | * imposed on the size of data payload by dm-log-userspace-transfer.c: | |
28 | * dm_consult_userspace(). | |
29 | */ | |
30 | #define MAX_FLUSH_GROUP_COUNT 32 | |
31 | ||
f5db4af4 JB |
32 | struct log_c { |
33 | struct dm_target *ti; | |
5a25f0eb | 34 | struct dm_dev *log_dev; |
f5db4af4 JB |
35 | uint32_t region_size; |
36 | region_t region_count; | |
7ec23d50 | 37 | uint64_t luid; |
f5db4af4 JB |
38 | char uuid[DM_UUID_LEN]; |
39 | ||
40 | char *usr_argv_str; | |
41 | uint32_t usr_argc; | |
42 | ||
43 | /* | |
44 | * in_sync_hint gets set when doing is_remote_recovering. It | |
45 | * represents the first region that needs recovery. IOW, the | |
46 | * first zero bit of sync_bits. This can be useful for to limit | |
47 | * traffic for calls like is_remote_recovering and get_resync_work, | |
48 | * but be take care in its use for anything else. | |
49 | */ | |
50 | uint64_t in_sync_hint; | |
51 | ||
909cc4fb JB |
52 | /* |
53 | * Mark and clear requests are held until a flush is issued | |
54 | * so that we can group, and thereby limit, the amount of | |
55 | * network traffic between kernel and userspace. The 'flush_lock' | |
56 | * is used to protect these lists. | |
57 | */ | |
f5db4af4 | 58 | spinlock_t flush_lock; |
909cc4fb JB |
59 | struct list_head mark_list; |
60 | struct list_head clear_list; | |
f5db4af4 JB |
61 | }; |
62 | ||
63 | static mempool_t *flush_entry_pool; | |
64 | ||
65 | static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data) | |
66 | { | |
67 | return kmalloc(sizeof(struct flush_entry), gfp_mask); | |
68 | } | |
69 | ||
70 | static void flush_entry_free(void *element, void *pool_data) | |
71 | { | |
72 | kfree(element); | |
73 | } | |
74 | ||
75 | static int userspace_do_request(struct log_c *lc, const char *uuid, | |
76 | int request_type, char *data, size_t data_size, | |
77 | char *rdata, size_t *rdata_size) | |
78 | { | |
79 | int r; | |
80 | ||
81 | /* | |
82 | * If the server isn't there, -ESRCH is returned, | |
83 | * and we must keep trying until the server is | |
84 | * restored. | |
85 | */ | |
86 | retry: | |
7ec23d50 | 87 | r = dm_consult_userspace(uuid, lc->luid, request_type, data, |
f5db4af4 JB |
88 | data_size, rdata, rdata_size); |
89 | ||
90 | if (r != -ESRCH) | |
91 | return r; | |
92 | ||
93 | DMERR(" Userspace log server not found."); | |
94 | while (1) { | |
95 | set_current_state(TASK_INTERRUPTIBLE); | |
96 | schedule_timeout(2*HZ); | |
97 | DMWARN("Attempting to contact userspace log server..."); | |
7ec23d50 JB |
98 | r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR, |
99 | lc->usr_argv_str, | |
f5db4af4 JB |
100 | strlen(lc->usr_argv_str) + 1, |
101 | NULL, NULL); | |
102 | if (!r) | |
103 | break; | |
104 | } | |
105 | DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); | |
7ec23d50 | 106 | r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL, |
f5db4af4 JB |
107 | 0, NULL, NULL); |
108 | if (!r) | |
109 | goto retry; | |
110 | ||
111 | DMERR("Error trying to resume userspace log: %d", r); | |
112 | ||
113 | return -ESRCH; | |
114 | } | |
115 | ||
116 | static int build_constructor_string(struct dm_target *ti, | |
117 | unsigned argc, char **argv, | |
118 | char **ctr_str) | |
119 | { | |
120 | int i, str_size; | |
121 | char *str = NULL; | |
122 | ||
123 | *ctr_str = NULL; | |
124 | ||
125 | for (i = 0, str_size = 0; i < argc; i++) | |
126 | str_size += strlen(argv[i]) + 1; /* +1 for space between args */ | |
127 | ||
128 | str_size += 20; /* Max number of chars in a printed u64 number */ | |
129 | ||
130 | str = kzalloc(str_size, GFP_KERNEL); | |
131 | if (!str) { | |
132 | DMWARN("Unable to allocate memory for constructor string"); | |
133 | return -ENOMEM; | |
134 | } | |
135 | ||
b8313b6d JB |
136 | str_size = sprintf(str, "%llu", (unsigned long long)ti->len); |
137 | for (i = 0; i < argc; i++) | |
138 | str_size += sprintf(str + str_size, " %s", argv[i]); | |
f5db4af4 JB |
139 | |
140 | *ctr_str = str; | |
141 | return str_size; | |
142 | } | |
143 | ||
144 | /* | |
145 | * userspace_ctr | |
146 | * | |
147 | * argv contains: | |
148 | * <UUID> <other args> | |
149 | * Where 'other args' is the userspace implementation specific log | |
150 | * arguments. An example might be: | |
b8954457 | 151 | * <UUID> clustered-disk <arg count> <log dev> <region_size> [[no]sync] |
f5db4af4 JB |
152 | * |
153 | * So, this module will strip off the <UUID> for identification purposes | |
154 | * when communicating with userspace about a log; but will pass on everything | |
155 | * else. | |
156 | */ | |
157 | static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |
158 | unsigned argc, char **argv) | |
159 | { | |
160 | int r = 0; | |
161 | int str_size; | |
162 | char *ctr_str = NULL; | |
163 | struct log_c *lc = NULL; | |
164 | uint64_t rdata; | |
165 | size_t rdata_size = sizeof(rdata); | |
5a25f0eb JB |
166 | char *devices_rdata = NULL; |
167 | size_t devices_rdata_size = DM_NAME_LEN; | |
f5db4af4 JB |
168 | |
169 | if (argc < 3) { | |
170 | DMWARN("Too few arguments to userspace dirty log"); | |
171 | return -EINVAL; | |
172 | } | |
173 | ||
5a25f0eb | 174 | lc = kzalloc(sizeof(*lc), GFP_KERNEL); |
f5db4af4 JB |
175 | if (!lc) { |
176 | DMWARN("Unable to allocate userspace log context."); | |
177 | return -ENOMEM; | |
178 | } | |
179 | ||
7ec23d50 | 180 | /* The ptr value is sufficient for local unique id */ |
bca915aa | 181 | lc->luid = (unsigned long)lc; |
7ec23d50 | 182 | |
f5db4af4 JB |
183 | lc->ti = ti; |
184 | ||
185 | if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { | |
186 | DMWARN("UUID argument too long."); | |
187 | kfree(lc); | |
188 | return -EINVAL; | |
189 | } | |
190 | ||
191 | strncpy(lc->uuid, argv[0], DM_UUID_LEN); | |
192 | spin_lock_init(&lc->flush_lock); | |
909cc4fb JB |
193 | INIT_LIST_HEAD(&lc->mark_list); |
194 | INIT_LIST_HEAD(&lc->clear_list); | |
f5db4af4 JB |
195 | |
196 | str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); | |
197 | if (str_size < 0) { | |
198 | kfree(lc); | |
199 | return str_size; | |
200 | } | |
201 | ||
5a25f0eb JB |
202 | devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL); |
203 | if (!devices_rdata) { | |
204 | DMERR("Failed to allocate memory for device information"); | |
205 | r = -ENOMEM; | |
206 | goto out; | |
207 | } | |
208 | ||
209 | /* | |
210 | * Send table string and get back any opened device. | |
211 | */ | |
7ec23d50 | 212 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, |
5a25f0eb JB |
213 | ctr_str, str_size, |
214 | devices_rdata, &devices_rdata_size); | |
f5db4af4 | 215 | |
4a038677 JB |
216 | if (r < 0) { |
217 | if (r == -ESRCH) | |
218 | DMERR("Userspace log server not found"); | |
219 | else | |
220 | DMERR("Userspace log server failed to create log"); | |
f5db4af4 JB |
221 | goto out; |
222 | } | |
223 | ||
224 | /* Since the region size does not change, get it now */ | |
225 | rdata_size = sizeof(rdata); | |
7ec23d50 | 226 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE, |
f5db4af4 JB |
227 | NULL, 0, (char *)&rdata, &rdata_size); |
228 | ||
229 | if (r) { | |
230 | DMERR("Failed to get region size of dirty log"); | |
231 | goto out; | |
232 | } | |
233 | ||
234 | lc->region_size = (uint32_t)rdata; | |
235 | lc->region_count = dm_sector_div_up(ti->len, lc->region_size); | |
236 | ||
5a25f0eb JB |
237 | if (devices_rdata_size) { |
238 | if (devices_rdata[devices_rdata_size - 1] != '\0') { | |
239 | DMERR("DM_ULOG_CTR device return string not properly terminated"); | |
240 | r = -EINVAL; | |
241 | goto out; | |
242 | } | |
243 | r = dm_get_device(ti, devices_rdata, | |
244 | dm_table_get_mode(ti->table), &lc->log_dev); | |
245 | if (r) | |
246 | DMERR("Failed to register %s with device-mapper", | |
247 | devices_rdata); | |
248 | } | |
f5db4af4 | 249 | out: |
5a25f0eb | 250 | kfree(devices_rdata); |
f5db4af4 JB |
251 | if (r) { |
252 | kfree(lc); | |
253 | kfree(ctr_str); | |
254 | } else { | |
255 | lc->usr_argv_str = ctr_str; | |
256 | lc->usr_argc = argc; | |
257 | log->context = lc; | |
258 | } | |
259 | ||
260 | return r; | |
261 | } | |
262 | ||
263 | static void userspace_dtr(struct dm_dirty_log *log) | |
264 | { | |
f5db4af4 JB |
265 | struct log_c *lc = log->context; |
266 | ||
4a038677 | 267 | (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, |
f5db4af4 JB |
268 | NULL, 0, |
269 | NULL, NULL); | |
270 | ||
5a25f0eb JB |
271 | if (lc->log_dev) |
272 | dm_put_device(lc->ti, lc->log_dev); | |
273 | ||
f5db4af4 JB |
274 | kfree(lc->usr_argv_str); |
275 | kfree(lc); | |
276 | ||
277 | return; | |
278 | } | |
279 | ||
280 | static int userspace_presuspend(struct dm_dirty_log *log) | |
281 | { | |
282 | int r; | |
283 | struct log_c *lc = log->context; | |
284 | ||
7ec23d50 | 285 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, |
f5db4af4 JB |
286 | NULL, 0, |
287 | NULL, NULL); | |
288 | ||
289 | return r; | |
290 | } | |
291 | ||
292 | static int userspace_postsuspend(struct dm_dirty_log *log) | |
293 | { | |
294 | int r; | |
295 | struct log_c *lc = log->context; | |
296 | ||
7ec23d50 | 297 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, |
f5db4af4 JB |
298 | NULL, 0, |
299 | NULL, NULL); | |
300 | ||
301 | return r; | |
302 | } | |
303 | ||
304 | static int userspace_resume(struct dm_dirty_log *log) | |
305 | { | |
306 | int r; | |
307 | struct log_c *lc = log->context; | |
308 | ||
309 | lc->in_sync_hint = 0; | |
7ec23d50 | 310 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, |
f5db4af4 JB |
311 | NULL, 0, |
312 | NULL, NULL); | |
313 | ||
314 | return r; | |
315 | } | |
316 | ||
317 | static uint32_t userspace_get_region_size(struct dm_dirty_log *log) | |
318 | { | |
319 | struct log_c *lc = log->context; | |
320 | ||
321 | return lc->region_size; | |
322 | } | |
323 | ||
324 | /* | |
325 | * userspace_is_clean | |
326 | * | |
327 | * Check whether a region is clean. If there is any sort of | |
328 | * failure when consulting the server, we return not clean. | |
329 | * | |
330 | * Returns: 1 if clean, 0 otherwise | |
331 | */ | |
332 | static int userspace_is_clean(struct dm_dirty_log *log, region_t region) | |
333 | { | |
334 | int r; | |
335 | uint64_t region64 = (uint64_t)region; | |
336 | int64_t is_clean; | |
337 | size_t rdata_size; | |
338 | struct log_c *lc = log->context; | |
339 | ||
340 | rdata_size = sizeof(is_clean); | |
341 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN, | |
342 | (char *)®ion64, sizeof(region64), | |
343 | (char *)&is_clean, &rdata_size); | |
344 | ||
345 | return (r) ? 0 : (int)is_clean; | |
346 | } | |
347 | ||
348 | /* | |
349 | * userspace_in_sync | |
350 | * | |
351 | * Check if the region is in-sync. If there is any sort | |
352 | * of failure when consulting the server, we assume that | |
353 | * the region is not in sync. | |
354 | * | |
355 | * If 'can_block' is set, return immediately | |
356 | * | |
357 | * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK | |
358 | */ | |
359 | static int userspace_in_sync(struct dm_dirty_log *log, region_t region, | |
360 | int can_block) | |
361 | { | |
362 | int r; | |
363 | uint64_t region64 = region; | |
364 | int64_t in_sync; | |
365 | size_t rdata_size; | |
366 | struct log_c *lc = log->context; | |
367 | ||
368 | /* | |
369 | * We can never respond directly - even if in_sync_hint is | |
370 | * set. This is because another machine could see a device | |
371 | * failure and mark the region out-of-sync. If we don't go | |
372 | * to userspace to ask, we might think the region is in-sync | |
373 | * and allow a read to pick up data that is stale. (This is | |
374 | * very unlikely if a device actually fails; but it is very | |
375 | * likely if a connection to one device from one machine fails.) | |
376 | * | |
377 | * There still might be a problem if the mirror caches the region | |
378 | * state as in-sync... but then this call would not be made. So, | |
379 | * that is a mirror problem. | |
380 | */ | |
381 | if (!can_block) | |
382 | return -EWOULDBLOCK; | |
383 | ||
384 | rdata_size = sizeof(in_sync); | |
385 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC, | |
386 | (char *)®ion64, sizeof(region64), | |
387 | (char *)&in_sync, &rdata_size); | |
388 | return (r) ? 0 : (int)in_sync; | |
389 | } | |
390 | ||
085ae065 JB |
391 | static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list) |
392 | { | |
393 | int r = 0; | |
394 | struct flush_entry *fe; | |
395 | ||
396 | list_for_each_entry(fe, flush_list, list) { | |
397 | r = userspace_do_request(lc, lc->uuid, fe->type, | |
398 | (char *)&fe->region, | |
399 | sizeof(fe->region), | |
400 | NULL, NULL); | |
401 | if (r) | |
402 | break; | |
403 | } | |
404 | ||
405 | return r; | |
406 | } | |
407 | ||
408 | static int flush_by_group(struct log_c *lc, struct list_head *flush_list) | |
409 | { | |
410 | int r = 0; | |
411 | int count; | |
412 | uint32_t type = 0; | |
413 | struct flush_entry *fe, *tmp_fe; | |
414 | LIST_HEAD(tmp_list); | |
415 | uint64_t group[MAX_FLUSH_GROUP_COUNT]; | |
416 | ||
417 | /* | |
418 | * Group process the requests | |
419 | */ | |
420 | while (!list_empty(flush_list)) { | |
421 | count = 0; | |
422 | ||
423 | list_for_each_entry_safe(fe, tmp_fe, flush_list, list) { | |
424 | group[count] = fe->region; | |
425 | count++; | |
426 | ||
6c9b27ab | 427 | list_move(&fe->list, &tmp_list); |
085ae065 JB |
428 | |
429 | type = fe->type; | |
430 | if (count >= MAX_FLUSH_GROUP_COUNT) | |
431 | break; | |
432 | } | |
433 | ||
434 | r = userspace_do_request(lc, lc->uuid, type, | |
435 | (char *)(group), | |
436 | count * sizeof(uint64_t), | |
437 | NULL, NULL); | |
438 | if (r) { | |
439 | /* Group send failed. Attempt one-by-one. */ | |
440 | list_splice_init(&tmp_list, flush_list); | |
441 | r = flush_one_by_one(lc, flush_list); | |
442 | break; | |
443 | } | |
444 | } | |
445 | ||
446 | /* | |
447 | * Must collect flush_entrys that were successfully processed | |
448 | * as a group so that they will be free'd by the caller. | |
449 | */ | |
450 | list_splice_init(&tmp_list, flush_list); | |
451 | ||
452 | return r; | |
453 | } | |
454 | ||
f5db4af4 JB |
455 | /* |
456 | * userspace_flush | |
457 | * | |
458 | * This function is ok to block. | |
459 | * The flush happens in two stages. First, it sends all | |
460 | * clear/mark requests that are on the list. Then it | |
461 | * tells the server to commit them. This gives the | |
462 | * server a chance to optimise the commit, instead of | |
463 | * doing it for every request. | |
464 | * | |
465 | * Additionally, we could implement another thread that | |
466 | * sends the requests up to the server - reducing the | |
467 | * load on flush. Then the flush would have less in | |
468 | * the list and be responsible for the finishing commit. | |
469 | * | |
470 | * Returns: 0 on success, < 0 on failure | |
471 | */ | |
472 | static int userspace_flush(struct dm_dirty_log *log) | |
473 | { | |
474 | int r = 0; | |
475 | unsigned long flags; | |
476 | struct log_c *lc = log->context; | |
909cc4fb JB |
477 | LIST_HEAD(mark_list); |
478 | LIST_HEAD(clear_list); | |
f5db4af4 JB |
479 | struct flush_entry *fe, *tmp_fe; |
480 | ||
481 | spin_lock_irqsave(&lc->flush_lock, flags); | |
909cc4fb JB |
482 | list_splice_init(&lc->mark_list, &mark_list); |
483 | list_splice_init(&lc->clear_list, &clear_list); | |
f5db4af4 JB |
484 | spin_unlock_irqrestore(&lc->flush_lock, flags); |
485 | ||
909cc4fb | 486 | if (list_empty(&mark_list) && list_empty(&clear_list)) |
f5db4af4 JB |
487 | return 0; |
488 | ||
085ae065 JB |
489 | r = flush_by_group(lc, &mark_list); |
490 | if (r) | |
491 | goto fail; | |
909cc4fb | 492 | |
085ae065 JB |
493 | r = flush_by_group(lc, &clear_list); |
494 | if (r) | |
495 | goto fail; | |
f5db4af4 JB |
496 | |
497 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, | |
498 | NULL, 0, NULL, NULL); | |
499 | ||
500 | fail: | |
501 | /* | |
502 | * We can safely remove these entries, even if failure. | |
503 | * Calling code will receive an error and will know that | |
504 | * the log facility has failed. | |
505 | */ | |
909cc4fb JB |
506 | list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) { |
507 | list_del(&fe->list); | |
508 | mempool_free(fe, flush_entry_pool); | |
509 | } | |
510 | list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) { | |
f5db4af4 JB |
511 | list_del(&fe->list); |
512 | mempool_free(fe, flush_entry_pool); | |
513 | } | |
514 | ||
515 | if (r) | |
516 | dm_table_event(lc->ti->table); | |
517 | ||
518 | return r; | |
519 | } | |
520 | ||
521 | /* | |
522 | * userspace_mark_region | |
523 | * | |
524 | * This function should avoid blocking unless absolutely required. | |
525 | * (Memory allocation is valid for blocking.) | |
526 | */ | |
527 | static void userspace_mark_region(struct dm_dirty_log *log, region_t region) | |
528 | { | |
529 | unsigned long flags; | |
530 | struct log_c *lc = log->context; | |
531 | struct flush_entry *fe; | |
532 | ||
533 | /* Wait for an allocation, but _never_ fail */ | |
534 | fe = mempool_alloc(flush_entry_pool, GFP_NOIO); | |
535 | BUG_ON(!fe); | |
536 | ||
537 | spin_lock_irqsave(&lc->flush_lock, flags); | |
538 | fe->type = DM_ULOG_MARK_REGION; | |
539 | fe->region = region; | |
909cc4fb | 540 | list_add(&fe->list, &lc->mark_list); |
f5db4af4 JB |
541 | spin_unlock_irqrestore(&lc->flush_lock, flags); |
542 | ||
543 | return; | |
544 | } | |
545 | ||
546 | /* | |
547 | * userspace_clear_region | |
548 | * | |
549 | * This function must not block. | |
550 | * So, the alloc can't block. In the worst case, it is ok to | |
551 | * fail. It would simply mean we can't clear the region. | |
552 | * Does nothing to current sync context, but does mean | |
553 | * the region will be re-sync'ed on a reload of the mirror | |
554 | * even though it is in-sync. | |
555 | */ | |
556 | static void userspace_clear_region(struct dm_dirty_log *log, region_t region) | |
557 | { | |
558 | unsigned long flags; | |
559 | struct log_c *lc = log->context; | |
560 | struct flush_entry *fe; | |
561 | ||
562 | /* | |
563 | * If we fail to allocate, we skip the clearing of | |
564 | * the region. This doesn't hurt us in any way, except | |
565 | * to cause the region to be resync'ed when the | |
566 | * device is activated next time. | |
567 | */ | |
568 | fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC); | |
569 | if (!fe) { | |
570 | DMERR("Failed to allocate memory to clear region."); | |
571 | return; | |
572 | } | |
573 | ||
574 | spin_lock_irqsave(&lc->flush_lock, flags); | |
575 | fe->type = DM_ULOG_CLEAR_REGION; | |
576 | fe->region = region; | |
909cc4fb | 577 | list_add(&fe->list, &lc->clear_list); |
f5db4af4 JB |
578 | spin_unlock_irqrestore(&lc->flush_lock, flags); |
579 | ||
580 | return; | |
581 | } | |
582 | ||
583 | /* | |
584 | * userspace_get_resync_work | |
585 | * | |
586 | * Get a region that needs recovery. It is valid to return | |
587 | * an error for this function. | |
588 | * | |
589 | * Returns: 1 if region filled, 0 if no work, <0 on error | |
590 | */ | |
591 | static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region) | |
592 | { | |
593 | int r; | |
594 | size_t rdata_size; | |
595 | struct log_c *lc = log->context; | |
596 | struct { | |
597 | int64_t i; /* 64-bit for mix arch compatibility */ | |
598 | region_t r; | |
599 | } pkg; | |
600 | ||
601 | if (lc->in_sync_hint >= lc->region_count) | |
602 | return 0; | |
603 | ||
604 | rdata_size = sizeof(pkg); | |
605 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK, | |
606 | NULL, 0, | |
607 | (char *)&pkg, &rdata_size); | |
608 | ||
609 | *region = pkg.r; | |
610 | return (r) ? r : (int)pkg.i; | |
611 | } | |
612 | ||
613 | /* | |
614 | * userspace_set_region_sync | |
615 | * | |
616 | * Set the sync status of a given region. This function | |
617 | * must not fail. | |
618 | */ | |
619 | static void userspace_set_region_sync(struct dm_dirty_log *log, | |
620 | region_t region, int in_sync) | |
621 | { | |
622 | int r; | |
623 | struct log_c *lc = log->context; | |
624 | struct { | |
625 | region_t r; | |
626 | int64_t i; | |
627 | } pkg; | |
628 | ||
629 | pkg.r = region; | |
630 | pkg.i = (int64_t)in_sync; | |
631 | ||
632 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC, | |
633 | (char *)&pkg, sizeof(pkg), | |
634 | NULL, NULL); | |
635 | ||
636 | /* | |
637 | * It would be nice to be able to report failures. | |
638 | * However, it is easy emough to detect and resolve. | |
639 | */ | |
640 | return; | |
641 | } | |
642 | ||
643 | /* | |
644 | * userspace_get_sync_count | |
645 | * | |
646 | * If there is any sort of failure when consulting the server, | |
647 | * we assume that the sync count is zero. | |
648 | * | |
649 | * Returns: sync count on success, 0 on failure | |
650 | */ | |
651 | static region_t userspace_get_sync_count(struct dm_dirty_log *log) | |
652 | { | |
653 | int r; | |
654 | size_t rdata_size; | |
655 | uint64_t sync_count; | |
656 | struct log_c *lc = log->context; | |
657 | ||
658 | rdata_size = sizeof(sync_count); | |
659 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT, | |
660 | NULL, 0, | |
661 | (char *)&sync_count, &rdata_size); | |
662 | ||
663 | if (r) | |
664 | return 0; | |
665 | ||
666 | if (sync_count >= lc->region_count) | |
667 | lc->in_sync_hint = lc->region_count; | |
668 | ||
669 | return (region_t)sync_count; | |
670 | } | |
671 | ||
672 | /* | |
673 | * userspace_status | |
674 | * | |
675 | * Returns: amount of space consumed | |
676 | */ | |
677 | static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, | |
678 | char *result, unsigned maxlen) | |
679 | { | |
680 | int r = 0; | |
b8313b6d | 681 | char *table_args; |
f5db4af4 JB |
682 | size_t sz = (size_t)maxlen; |
683 | struct log_c *lc = log->context; | |
684 | ||
685 | switch (status_type) { | |
686 | case STATUSTYPE_INFO: | |
687 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO, | |
688 | NULL, 0, | |
689 | result, &sz); | |
690 | ||
691 | if (r) { | |
692 | sz = 0; | |
693 | DMEMIT("%s 1 COM_FAILURE", log->type->name); | |
694 | } | |
695 | break; | |
696 | case STATUSTYPE_TABLE: | |
697 | sz = 0; | |
0d03d59d | 698 | table_args = strchr(lc->usr_argv_str, ' '); |
b8313b6d JB |
699 | BUG_ON(!table_args); /* There will always be a ' ' */ |
700 | table_args++; | |
701 | ||
702 | DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc, | |
703 | lc->uuid, table_args); | |
f5db4af4 JB |
704 | break; |
705 | } | |
706 | return (r) ? 0 : (int)sz; | |
707 | } | |
708 | ||
709 | /* | |
710 | * userspace_is_remote_recovering | |
711 | * | |
712 | * Returns: 1 if region recovering, 0 otherwise | |
713 | */ | |
714 | static int userspace_is_remote_recovering(struct dm_dirty_log *log, | |
715 | region_t region) | |
716 | { | |
717 | int r; | |
718 | uint64_t region64 = region; | |
719 | struct log_c *lc = log->context; | |
720 | static unsigned long long limit; | |
721 | struct { | |
722 | int64_t is_recovering; | |
723 | uint64_t in_sync_hint; | |
724 | } pkg; | |
725 | size_t rdata_size = sizeof(pkg); | |
726 | ||
727 | /* | |
728 | * Once the mirror has been reported to be in-sync, | |
729 | * it will never again ask for recovery work. So, | |
730 | * we can safely say there is not a remote machine | |
731 | * recovering if the device is in-sync. (in_sync_hint | |
732 | * must be reset at resume time.) | |
733 | */ | |
734 | if (region < lc->in_sync_hint) | |
735 | return 0; | |
736 | else if (jiffies < limit) | |
737 | return 1; | |
738 | ||
739 | limit = jiffies + (HZ / 4); | |
740 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING, | |
741 | (char *)®ion64, sizeof(region64), | |
742 | (char *)&pkg, &rdata_size); | |
743 | if (r) | |
744 | return 1; | |
745 | ||
746 | lc->in_sync_hint = pkg.in_sync_hint; | |
747 | ||
748 | return (int)pkg.is_recovering; | |
749 | } | |
750 | ||
751 | static struct dm_dirty_log_type _userspace_type = { | |
752 | .name = "userspace", | |
753 | .module = THIS_MODULE, | |
754 | .ctr = userspace_ctr, | |
755 | .dtr = userspace_dtr, | |
756 | .presuspend = userspace_presuspend, | |
757 | .postsuspend = userspace_postsuspend, | |
758 | .resume = userspace_resume, | |
759 | .get_region_size = userspace_get_region_size, | |
760 | .is_clean = userspace_is_clean, | |
761 | .in_sync = userspace_in_sync, | |
762 | .flush = userspace_flush, | |
763 | .mark_region = userspace_mark_region, | |
764 | .clear_region = userspace_clear_region, | |
765 | .get_resync_work = userspace_get_resync_work, | |
766 | .set_region_sync = userspace_set_region_sync, | |
767 | .get_sync_count = userspace_get_sync_count, | |
768 | .status = userspace_status, | |
769 | .is_remote_recovering = userspace_is_remote_recovering, | |
770 | }; | |
771 | ||
772 | static int __init userspace_dirty_log_init(void) | |
773 | { | |
774 | int r = 0; | |
775 | ||
776 | flush_entry_pool = mempool_create(100, flush_entry_alloc, | |
777 | flush_entry_free, NULL); | |
778 | ||
779 | if (!flush_entry_pool) { | |
780 | DMWARN("Unable to create flush_entry_pool: No memory."); | |
781 | return -ENOMEM; | |
782 | } | |
783 | ||
784 | r = dm_ulog_tfr_init(); | |
785 | if (r) { | |
786 | DMWARN("Unable to initialize userspace log communications"); | |
787 | mempool_destroy(flush_entry_pool); | |
788 | return r; | |
789 | } | |
790 | ||
791 | r = dm_dirty_log_type_register(&_userspace_type); | |
792 | if (r) { | |
793 | DMWARN("Couldn't register userspace dirty log type"); | |
794 | dm_ulog_tfr_exit(); | |
795 | mempool_destroy(flush_entry_pool); | |
796 | return r; | |
797 | } | |
798 | ||
86a54a48 | 799 | DMINFO("version " DM_LOG_USERSPACE_VSN " loaded"); |
f5db4af4 JB |
800 | return 0; |
801 | } | |
802 | ||
803 | static void __exit userspace_dirty_log_exit(void) | |
804 | { | |
805 | dm_dirty_log_type_unregister(&_userspace_type); | |
806 | dm_ulog_tfr_exit(); | |
807 | mempool_destroy(flush_entry_pool); | |
808 | ||
86a54a48 | 809 | DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded"); |
f5db4af4 JB |
810 | return; |
811 | } | |
812 | ||
813 | module_init(userspace_dirty_log_init); | |
814 | module_exit(userspace_dirty_log_exit); | |
815 | ||
816 | MODULE_DESCRIPTION(DM_NAME " userspace dirty log link"); | |
817 | MODULE_AUTHOR("Jonathan Brassow <[email protected]>"); | |
818 | MODULE_LICENSE("GPL"); |