]>
Commit | Line | Data |
---|---|---|
a7f6a5fb MF |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * Copyright (C) 2004, 2005 Oracle. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public | |
17 | * License along with this program; if not, write to the | |
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 | * Boston, MA 021110-1307, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/kernel.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/jiffies.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/fs.h> | |
27 | #include <linux/bio.h> | |
28 | #include <linux/blkdev.h> | |
29 | #include <linux/delay.h> | |
30 | #include <linux/file.h> | |
31 | #include <linux/kthread.h> | |
32 | #include <linux/configfs.h> | |
33 | #include <linux/random.h> | |
34 | #include <linux/crc32.h> | |
35 | #include <linux/time.h> | |
87d3d3f3 | 36 | #include <linux/debugfs.h> |
5a0e3ad6 | 37 | #include <linux/slab.h> |
a7f6a5fb MF |
38 | |
39 | #include "heartbeat.h" | |
40 | #include "tcp.h" | |
41 | #include "nodemanager.h" | |
42 | #include "quorum.h" | |
43 | ||
44 | #include "masklog.h" | |
45 | ||
46 | ||
47 | /* | |
48 | * The first heartbeat pass had one global thread that would serialize all hb | |
49 | * callback calls. This global serializing sem should only be removed once | |
50 | * we've made sure that all callees can deal with being called concurrently | |
51 | * from multiple hb region threads. | |
52 | */ | |
53 | static DECLARE_RWSEM(o2hb_callback_sem); | |
54 | ||
55 | /* | |
56 | * multiple hb threads are watching multiple regions. A node is live | |
57 | * whenever any of the threads sees activity from the node in its region. | |
58 | */ | |
34af946a | 59 | static DEFINE_SPINLOCK(o2hb_live_lock); |
a7f6a5fb MF |
60 | static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; |
61 | static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
62 | static LIST_HEAD(o2hb_node_events); | |
63 | static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue); | |
64 | ||
87d3d3f3 SM |
65 | #define O2HB_DEBUG_DIR "o2hb" |
66 | #define O2HB_DEBUG_LIVENODES "livenodes" | |
67 | static struct dentry *o2hb_debug_dir; | |
68 | static struct dentry *o2hb_debug_livenodes; | |
69 | ||
a7f6a5fb MF |
70 | static LIST_HEAD(o2hb_all_regions); |
71 | ||
72 | static struct o2hb_callback { | |
73 | struct list_head list; | |
74 | } o2hb_callbacks[O2HB_NUM_CB]; | |
75 | ||
76 | static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); | |
77 | ||
78 | #define O2HB_DEFAULT_BLOCK_BITS 9 | |
79 | ||
80 | unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; | |
81 | ||
2bd63216 | 82 | /* Only sets a new threshold if there are no active regions. |
a7f6a5fb MF |
83 | * |
84 | * No locking or otherwise interesting code is required for reading | |
85 | * o2hb_dead_threshold as it can't change once regions are active and | |
86 | * it's not interesting to anyone until then anyway. */ | |
87 | static void o2hb_dead_threshold_set(unsigned int threshold) | |
88 | { | |
89 | if (threshold > O2HB_MIN_DEAD_THRESHOLD) { | |
90 | spin_lock(&o2hb_live_lock); | |
91 | if (list_empty(&o2hb_all_regions)) | |
92 | o2hb_dead_threshold = threshold; | |
93 | spin_unlock(&o2hb_live_lock); | |
94 | } | |
95 | } | |
96 | ||
97 | struct o2hb_node_event { | |
98 | struct list_head hn_item; | |
99 | enum o2hb_callback_type hn_event_type; | |
100 | struct o2nm_node *hn_node; | |
101 | int hn_node_num; | |
102 | }; | |
103 | ||
104 | struct o2hb_disk_slot { | |
105 | struct o2hb_disk_heartbeat_block *ds_raw_block; | |
106 | u8 ds_node_num; | |
107 | u64 ds_last_time; | |
108 | u64 ds_last_generation; | |
109 | u16 ds_equal_samples; | |
110 | u16 ds_changed_samples; | |
111 | struct list_head ds_live_item; | |
112 | }; | |
113 | ||
114 | /* each thread owns a region.. when we're asked to tear down the region | |
115 | * we ask the thread to stop, who cleans up the region */ | |
116 | struct o2hb_region { | |
117 | struct config_item hr_item; | |
118 | ||
119 | struct list_head hr_all_item; | |
120 | unsigned hr_unclean_stop:1; | |
121 | ||
122 | /* protected by the hr_callback_sem */ | |
123 | struct task_struct *hr_task; | |
124 | ||
125 | unsigned int hr_blocks; | |
126 | unsigned long long hr_start_block; | |
127 | ||
128 | unsigned int hr_block_bits; | |
129 | unsigned int hr_block_bytes; | |
130 | ||
131 | unsigned int hr_slots_per_page; | |
132 | unsigned int hr_num_pages; | |
133 | ||
134 | struct page **hr_slot_data; | |
135 | struct block_device *hr_bdev; | |
136 | struct o2hb_disk_slot *hr_slots; | |
137 | ||
138 | /* let the person setting up hb wait for it to return until it | |
139 | * has reached a 'steady' state. This will be fixed when we have | |
140 | * a more complete api that doesn't lead to this sort of fragility. */ | |
141 | atomic_t hr_steady_iterations; | |
142 | ||
143 | char hr_dev_name[BDEVNAME_SIZE]; | |
144 | ||
145 | unsigned int hr_timeout_ms; | |
146 | ||
147 | /* randomized as the region goes up and down so that a node | |
148 | * recognizes a node going up and down in one iteration */ | |
149 | u64 hr_generation; | |
150 | ||
c4028958 | 151 | struct delayed_work hr_write_timeout_work; |
a7f6a5fb MF |
152 | unsigned long hr_last_timeout_start; |
153 | ||
154 | /* Used during o2hb_check_slot to hold a copy of the block | |
155 | * being checked because we temporarily have to zero out the | |
156 | * crc field. */ | |
157 | struct o2hb_disk_heartbeat_block *hr_tmp_block; | |
158 | }; | |
159 | ||
160 | struct o2hb_bio_wait_ctxt { | |
161 | atomic_t wc_num_reqs; | |
162 | struct completion wc_io_complete; | |
a9e2ae39 | 163 | int wc_error; |
a7f6a5fb MF |
164 | }; |
165 | ||
c4028958 | 166 | static void o2hb_write_timeout(struct work_struct *work) |
a7f6a5fb | 167 | { |
c4028958 DH |
168 | struct o2hb_region *reg = |
169 | container_of(work, struct o2hb_region, | |
170 | hr_write_timeout_work.work); | |
a7f6a5fb MF |
171 | |
172 | mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " | |
173 | "milliseconds\n", reg->hr_dev_name, | |
2bd63216 | 174 | jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); |
a7f6a5fb MF |
175 | o2quo_disk_timeout(); |
176 | } | |
177 | ||
178 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) | |
179 | { | |
b31d308d TM |
180 | mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", |
181 | O2HB_MAX_WRITE_TIMEOUT_MS); | |
a7f6a5fb MF |
182 | |
183 | cancel_delayed_work(®->hr_write_timeout_work); | |
184 | reg->hr_last_timeout_start = jiffies; | |
185 | schedule_delayed_work(®->hr_write_timeout_work, | |
186 | msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)); | |
187 | } | |
188 | ||
189 | static void o2hb_disarm_write_timeout(struct o2hb_region *reg) | |
190 | { | |
191 | cancel_delayed_work(®->hr_write_timeout_work); | |
192 | flush_scheduled_work(); | |
193 | } | |
194 | ||
b559292e | 195 | static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc) |
a7f6a5fb | 196 | { |
b559292e | 197 | atomic_set(&wc->wc_num_reqs, 1); |
a7f6a5fb | 198 | init_completion(&wc->wc_io_complete); |
a9e2ae39 | 199 | wc->wc_error = 0; |
a7f6a5fb MF |
200 | } |
201 | ||
202 | /* Used in error paths too */ | |
203 | static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc, | |
204 | unsigned int num) | |
205 | { | |
206 | /* sadly atomic_sub_and_test() isn't available on all platforms. The | |
207 | * good news is that the fast path only completes one at a time */ | |
208 | while(num--) { | |
209 | if (atomic_dec_and_test(&wc->wc_num_reqs)) { | |
210 | BUG_ON(num > 0); | |
211 | complete(&wc->wc_io_complete); | |
212 | } | |
213 | } | |
214 | } | |
215 | ||
216 | static void o2hb_wait_on_io(struct o2hb_region *reg, | |
217 | struct o2hb_bio_wait_ctxt *wc) | |
218 | { | |
219 | struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping; | |
220 | ||
221 | blk_run_address_space(mapping); | |
b559292e | 222 | o2hb_bio_wait_dec(wc, 1); |
a7f6a5fb MF |
223 | |
224 | wait_for_completion(&wc->wc_io_complete); | |
225 | } | |
226 | ||
782e3b3b | 227 | static void o2hb_bio_end_io(struct bio *bio, |
a7f6a5fb MF |
228 | int error) |
229 | { | |
230 | struct o2hb_bio_wait_ctxt *wc = bio->bi_private; | |
231 | ||
a9e2ae39 | 232 | if (error) { |
a7f6a5fb | 233 | mlog(ML_ERROR, "IO Error %d\n", error); |
a9e2ae39 MF |
234 | wc->wc_error = error; |
235 | } | |
a7f6a5fb | 236 | |
a7f6a5fb | 237 | o2hb_bio_wait_dec(wc, 1); |
b559292e | 238 | bio_put(bio); |
a7f6a5fb MF |
239 | } |
240 | ||
241 | /* Setup a Bio to cover I/O against num_slots slots starting at | |
242 | * start_slot. */ | |
243 | static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, | |
244 | struct o2hb_bio_wait_ctxt *wc, | |
b559292e PR |
245 | unsigned int *current_slot, |
246 | unsigned int max_slots) | |
a7f6a5fb | 247 | { |
b559292e | 248 | int len, current_page; |
a7f6a5fb MF |
249 | unsigned int vec_len, vec_start; |
250 | unsigned int bits = reg->hr_block_bits; | |
251 | unsigned int spp = reg->hr_slots_per_page; | |
b559292e | 252 | unsigned int cs = *current_slot; |
a7f6a5fb MF |
253 | struct bio *bio; |
254 | struct page *page; | |
255 | ||
a7f6a5fb MF |
256 | /* Testing has shown this allocation to take long enough under |
257 | * GFP_KERNEL that the local node can get fenced. It would be | |
258 | * nicest if we could pre-allocate these bios and avoid this | |
259 | * all together. */ | |
b559292e | 260 | bio = bio_alloc(GFP_ATOMIC, 16); |
a7f6a5fb MF |
261 | if (!bio) { |
262 | mlog(ML_ERROR, "Could not alloc slots BIO!\n"); | |
263 | bio = ERR_PTR(-ENOMEM); | |
264 | goto bail; | |
265 | } | |
266 | ||
267 | /* Must put everything in 512 byte sectors for the bio... */ | |
b559292e | 268 | bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); |
a7f6a5fb MF |
269 | bio->bi_bdev = reg->hr_bdev; |
270 | bio->bi_private = wc; | |
271 | bio->bi_end_io = o2hb_bio_end_io; | |
272 | ||
b559292e PR |
273 | vec_start = (cs << bits) % PAGE_CACHE_SIZE; |
274 | while(cs < max_slots) { | |
275 | current_page = cs / spp; | |
276 | page = reg->hr_slot_data[current_page]; | |
a7f6a5fb | 277 | |
bc7e97cb | 278 | vec_len = min(PAGE_CACHE_SIZE - vec_start, |
b559292e | 279 | (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); |
a7f6a5fb MF |
280 | |
281 | mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", | |
b559292e | 282 | current_page, vec_len, vec_start); |
a7f6a5fb MF |
283 | |
284 | len = bio_add_page(bio, page, vec_len, vec_start); | |
b559292e | 285 | if (len != vec_len) break; |
a7f6a5fb | 286 | |
b559292e | 287 | cs += vec_len / (PAGE_CACHE_SIZE/spp); |
a7f6a5fb MF |
288 | vec_start = 0; |
289 | } | |
290 | ||
291 | bail: | |
b559292e | 292 | *current_slot = cs; |
a7f6a5fb MF |
293 | return bio; |
294 | } | |
295 | ||
a7f6a5fb MF |
296 | static int o2hb_read_slots(struct o2hb_region *reg, |
297 | unsigned int max_slots) | |
298 | { | |
b559292e PR |
299 | unsigned int current_slot=0; |
300 | int status; | |
a7f6a5fb | 301 | struct o2hb_bio_wait_ctxt wc; |
a7f6a5fb MF |
302 | struct bio *bio; |
303 | ||
b559292e | 304 | o2hb_bio_wait_init(&wc); |
a7f6a5fb | 305 | |
b559292e PR |
306 | while(current_slot < max_slots) { |
307 | bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots); | |
a7f6a5fb | 308 | if (IS_ERR(bio)) { |
a7f6a5fb MF |
309 | status = PTR_ERR(bio); |
310 | mlog_errno(status); | |
311 | goto bail_and_wait; | |
312 | } | |
a7f6a5fb | 313 | |
b559292e | 314 | atomic_inc(&wc.wc_num_reqs); |
a7f6a5fb MF |
315 | submit_bio(READ, bio); |
316 | } | |
317 | ||
318 | status = 0; | |
319 | ||
320 | bail_and_wait: | |
321 | o2hb_wait_on_io(reg, &wc); | |
a9e2ae39 MF |
322 | if (wc.wc_error && !status) |
323 | status = wc.wc_error; | |
a7f6a5fb | 324 | |
a7f6a5fb MF |
325 | return status; |
326 | } | |
327 | ||
328 | static int o2hb_issue_node_write(struct o2hb_region *reg, | |
a7f6a5fb MF |
329 | struct o2hb_bio_wait_ctxt *write_wc) |
330 | { | |
331 | int status; | |
332 | unsigned int slot; | |
333 | struct bio *bio; | |
334 | ||
b559292e | 335 | o2hb_bio_wait_init(write_wc); |
a7f6a5fb MF |
336 | |
337 | slot = o2nm_this_node(); | |
338 | ||
b559292e | 339 | bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1); |
a7f6a5fb MF |
340 | if (IS_ERR(bio)) { |
341 | status = PTR_ERR(bio); | |
342 | mlog_errno(status); | |
343 | goto bail; | |
344 | } | |
345 | ||
b559292e | 346 | atomic_inc(&write_wc->wc_num_reqs); |
a7f6a5fb MF |
347 | submit_bio(WRITE, bio); |
348 | ||
a7f6a5fb MF |
349 | status = 0; |
350 | bail: | |
351 | return status; | |
352 | } | |
353 | ||
354 | static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg, | |
355 | struct o2hb_disk_heartbeat_block *hb_block) | |
356 | { | |
357 | __le32 old_cksum; | |
358 | u32 ret; | |
359 | ||
360 | /* We want to compute the block crc with a 0 value in the | |
361 | * hb_cksum field. Save it off here and replace after the | |
362 | * crc. */ | |
363 | old_cksum = hb_block->hb_cksum; | |
364 | hb_block->hb_cksum = 0; | |
365 | ||
366 | ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes); | |
367 | ||
368 | hb_block->hb_cksum = old_cksum; | |
369 | ||
370 | return ret; | |
371 | } | |
372 | ||
373 | static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block) | |
374 | { | |
70bacbdb MF |
375 | mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, " |
376 | "cksum = 0x%x, generation 0x%llx\n", | |
377 | (long long)le64_to_cpu(hb_block->hb_seq), | |
378 | hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum), | |
379 | (long long)le64_to_cpu(hb_block->hb_generation)); | |
a7f6a5fb MF |
380 | } |
381 | ||
382 | static int o2hb_verify_crc(struct o2hb_region *reg, | |
383 | struct o2hb_disk_heartbeat_block *hb_block) | |
384 | { | |
385 | u32 read, computed; | |
386 | ||
387 | read = le32_to_cpu(hb_block->hb_cksum); | |
388 | computed = o2hb_compute_block_crc_le(reg, hb_block); | |
389 | ||
390 | return read == computed; | |
391 | } | |
392 | ||
393 | /* We want to make sure that nobody is heartbeating on top of us -- | |
394 | * this will help detect an invalid configuration. */ | |
395 | static int o2hb_check_last_timestamp(struct o2hb_region *reg) | |
396 | { | |
397 | int node_num, ret; | |
398 | struct o2hb_disk_slot *slot; | |
399 | struct o2hb_disk_heartbeat_block *hb_block; | |
400 | ||
401 | node_num = o2nm_this_node(); | |
402 | ||
403 | ret = 1; | |
404 | slot = ®->hr_slots[node_num]; | |
405 | /* Don't check on our 1st timestamp */ | |
406 | if (slot->ds_last_time) { | |
407 | hb_block = slot->ds_raw_block; | |
408 | ||
409 | if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) | |
410 | ret = 0; | |
411 | } | |
412 | ||
413 | return ret; | |
414 | } | |
415 | ||
416 | static inline void o2hb_prepare_block(struct o2hb_region *reg, | |
417 | u64 generation) | |
418 | { | |
419 | int node_num; | |
420 | u64 cputime; | |
421 | struct o2hb_disk_slot *slot; | |
422 | struct o2hb_disk_heartbeat_block *hb_block; | |
423 | ||
424 | node_num = o2nm_this_node(); | |
425 | slot = ®->hr_slots[node_num]; | |
426 | ||
427 | hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block; | |
428 | memset(hb_block, 0, reg->hr_block_bytes); | |
429 | /* TODO: time stuff */ | |
430 | cputime = CURRENT_TIME.tv_sec; | |
431 | if (!cputime) | |
432 | cputime = 1; | |
433 | ||
434 | hb_block->hb_seq = cpu_to_le64(cputime); | |
435 | hb_block->hb_node = node_num; | |
436 | hb_block->hb_generation = cpu_to_le64(generation); | |
0db638f4 | 437 | hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS); |
a7f6a5fb MF |
438 | |
439 | /* This step must always happen last! */ | |
440 | hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, | |
441 | hb_block)); | |
442 | ||
70bacbdb | 443 | mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n", |
5fdf1e67 | 444 | (long long)generation, |
70bacbdb | 445 | le32_to_cpu(hb_block->hb_cksum)); |
a7f6a5fb MF |
446 | } |
447 | ||
448 | static void o2hb_fire_callbacks(struct o2hb_callback *hbcall, | |
449 | struct o2nm_node *node, | |
450 | int idx) | |
451 | { | |
452 | struct list_head *iter; | |
453 | struct o2hb_callback_func *f; | |
454 | ||
455 | list_for_each(iter, &hbcall->list) { | |
456 | f = list_entry(iter, struct o2hb_callback_func, hc_item); | |
457 | mlog(ML_HEARTBEAT, "calling funcs %p\n", f); | |
458 | (f->hc_func)(node, idx, f->hc_data); | |
459 | } | |
460 | } | |
461 | ||
462 | /* Will run the list in order until we process the passed event */ | |
463 | static void o2hb_run_event_list(struct o2hb_node_event *queued_event) | |
464 | { | |
465 | int empty; | |
466 | struct o2hb_callback *hbcall; | |
467 | struct o2hb_node_event *event; | |
468 | ||
469 | spin_lock(&o2hb_live_lock); | |
470 | empty = list_empty(&queued_event->hn_item); | |
471 | spin_unlock(&o2hb_live_lock); | |
472 | if (empty) | |
473 | return; | |
474 | ||
475 | /* Holding callback sem assures we don't alter the callback | |
476 | * lists when doing this, and serializes ourselves with other | |
477 | * processes wanting callbacks. */ | |
478 | down_write(&o2hb_callback_sem); | |
479 | ||
480 | spin_lock(&o2hb_live_lock); | |
481 | while (!list_empty(&o2hb_node_events) | |
482 | && !list_empty(&queued_event->hn_item)) { | |
483 | event = list_entry(o2hb_node_events.next, | |
484 | struct o2hb_node_event, | |
485 | hn_item); | |
486 | list_del_init(&event->hn_item); | |
487 | spin_unlock(&o2hb_live_lock); | |
488 | ||
489 | mlog(ML_HEARTBEAT, "Node %s event for %d\n", | |
490 | event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN", | |
491 | event->hn_node_num); | |
492 | ||
493 | hbcall = hbcall_from_type(event->hn_event_type); | |
494 | ||
495 | /* We should *never* have gotten on to the list with a | |
496 | * bad type... This isn't something that we should try | |
497 | * to recover from. */ | |
498 | BUG_ON(IS_ERR(hbcall)); | |
499 | ||
500 | o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num); | |
501 | ||
502 | spin_lock(&o2hb_live_lock); | |
503 | } | |
504 | spin_unlock(&o2hb_live_lock); | |
505 | ||
506 | up_write(&o2hb_callback_sem); | |
507 | } | |
508 | ||
509 | static void o2hb_queue_node_event(struct o2hb_node_event *event, | |
510 | enum o2hb_callback_type type, | |
511 | struct o2nm_node *node, | |
512 | int node_num) | |
513 | { | |
514 | assert_spin_locked(&o2hb_live_lock); | |
515 | ||
516 | event->hn_event_type = type; | |
517 | event->hn_node = node; | |
518 | event->hn_node_num = node_num; | |
519 | ||
520 | mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n", | |
521 | type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num); | |
522 | ||
523 | list_add_tail(&event->hn_item, &o2hb_node_events); | |
524 | } | |
525 | ||
526 | static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) | |
527 | { | |
528 | struct o2hb_node_event event = | |
529 | { .hn_item = LIST_HEAD_INIT(event.hn_item), }; | |
530 | struct o2nm_node *node; | |
531 | ||
532 | node = o2nm_get_node_by_num(slot->ds_node_num); | |
533 | if (!node) | |
534 | return; | |
535 | ||
536 | spin_lock(&o2hb_live_lock); | |
537 | if (!list_empty(&slot->ds_live_item)) { | |
538 | mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n", | |
539 | slot->ds_node_num); | |
540 | ||
541 | list_del_init(&slot->ds_live_item); | |
542 | ||
543 | if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { | |
544 | clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); | |
545 | ||
546 | o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, | |
547 | slot->ds_node_num); | |
548 | } | |
549 | } | |
550 | spin_unlock(&o2hb_live_lock); | |
551 | ||
552 | o2hb_run_event_list(&event); | |
553 | ||
554 | o2nm_node_put(node); | |
555 | } | |
556 | ||
557 | static int o2hb_check_slot(struct o2hb_region *reg, | |
558 | struct o2hb_disk_slot *slot) | |
559 | { | |
560 | int changed = 0, gen_changed = 0; | |
561 | struct o2hb_node_event event = | |
562 | { .hn_item = LIST_HEAD_INIT(event.hn_item), }; | |
563 | struct o2nm_node *node; | |
564 | struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; | |
565 | u64 cputime; | |
0db638f4 MF |
566 | unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; |
567 | unsigned int slot_dead_ms; | |
a7f6a5fb MF |
568 | |
569 | memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); | |
570 | ||
571 | /* Is this correct? Do we assume that the node doesn't exist | |
572 | * if we're not configured for him? */ | |
573 | node = o2nm_get_node_by_num(slot->ds_node_num); | |
574 | if (!node) | |
575 | return 0; | |
576 | ||
577 | if (!o2hb_verify_crc(reg, hb_block)) { | |
578 | /* all paths from here will drop o2hb_live_lock for | |
579 | * us. */ | |
580 | spin_lock(&o2hb_live_lock); | |
581 | ||
582 | /* Don't print an error on the console in this case - | |
583 | * a freshly formatted heartbeat area will not have a | |
584 | * crc set on it. */ | |
585 | if (list_empty(&slot->ds_live_item)) | |
586 | goto out; | |
587 | ||
588 | /* The node is live but pushed out a bad crc. We | |
589 | * consider it a transient miss but don't populate any | |
590 | * other values as they may be junk. */ | |
591 | mlog(ML_ERROR, "Node %d has written a bad crc to %s\n", | |
592 | slot->ds_node_num, reg->hr_dev_name); | |
593 | o2hb_dump_slot(hb_block); | |
594 | ||
595 | slot->ds_equal_samples++; | |
596 | goto fire_callbacks; | |
597 | } | |
598 | ||
599 | /* we don't care if these wrap.. the state transitions below | |
600 | * clear at the right places */ | |
601 | cputime = le64_to_cpu(hb_block->hb_seq); | |
602 | if (slot->ds_last_time != cputime) | |
603 | slot->ds_changed_samples++; | |
604 | else | |
605 | slot->ds_equal_samples++; | |
606 | slot->ds_last_time = cputime; | |
607 | ||
608 | /* The node changed heartbeat generations. We assume this to | |
609 | * mean it dropped off but came back before we timed out. We | |
610 | * want to consider it down for the time being but don't want | |
611 | * to lose any changed_samples state we might build up to | |
612 | * considering it live again. */ | |
613 | if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) { | |
614 | gen_changed = 1; | |
615 | slot->ds_equal_samples = 0; | |
70bacbdb MF |
616 | mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx " |
617 | "to 0x%llx)\n", slot->ds_node_num, | |
618 | (long long)slot->ds_last_generation, | |
619 | (long long)le64_to_cpu(hb_block->hb_generation)); | |
a7f6a5fb MF |
620 | } |
621 | ||
622 | slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); | |
623 | ||
70bacbdb MF |
624 | mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x " |
625 | "seq %llu last %llu changed %u equal %u\n", | |
626 | slot->ds_node_num, (long long)slot->ds_last_generation, | |
627 | le32_to_cpu(hb_block->hb_cksum), | |
2bd63216 | 628 | (unsigned long long)le64_to_cpu(hb_block->hb_seq), |
70bacbdb | 629 | (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, |
a7f6a5fb MF |
630 | slot->ds_equal_samples); |
631 | ||
632 | spin_lock(&o2hb_live_lock); | |
633 | ||
634 | fire_callbacks: | |
635 | /* dead nodes only come to life after some number of | |
636 | * changes at any time during their dead time */ | |
637 | if (list_empty(&slot->ds_live_item) && | |
638 | slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) { | |
70bacbdb MF |
639 | mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n", |
640 | slot->ds_node_num, (long long)slot->ds_last_generation); | |
a7f6a5fb MF |
641 | |
642 | /* first on the list generates a callback */ | |
643 | if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { | |
644 | set_bit(slot->ds_node_num, o2hb_live_node_bitmap); | |
645 | ||
646 | o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node, | |
647 | slot->ds_node_num); | |
648 | ||
649 | changed = 1; | |
650 | } | |
651 | ||
652 | list_add_tail(&slot->ds_live_item, | |
653 | &o2hb_live_slots[slot->ds_node_num]); | |
654 | ||
655 | slot->ds_equal_samples = 0; | |
0db638f4 MF |
656 | |
657 | /* We want to be sure that all nodes agree on the | |
658 | * number of milliseconds before a node will be | |
659 | * considered dead. The self-fencing timeout is | |
660 | * computed from this value, and a discrepancy might | |
661 | * result in heartbeat calling a node dead when it | |
662 | * hasn't self-fenced yet. */ | |
663 | slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms); | |
664 | if (slot_dead_ms && slot_dead_ms != dead_ms) { | |
665 | /* TODO: Perhaps we can fail the region here. */ | |
666 | mlog(ML_ERROR, "Node %d on device %s has a dead count " | |
667 | "of %u ms, but our count is %u ms.\n" | |
668 | "Please double check your configuration values " | |
669 | "for 'O2CB_HEARTBEAT_THRESHOLD'\n", | |
670 | slot->ds_node_num, reg->hr_dev_name, slot_dead_ms, | |
671 | dead_ms); | |
672 | } | |
a7f6a5fb MF |
673 | goto out; |
674 | } | |
675 | ||
676 | /* if the list is dead, we're done.. */ | |
677 | if (list_empty(&slot->ds_live_item)) | |
678 | goto out; | |
679 | ||
680 | /* live nodes only go dead after enough consequtive missed | |
681 | * samples.. reset the missed counter whenever we see | |
682 | * activity */ | |
683 | if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { | |
684 | mlog(ML_HEARTBEAT, "Node %d left my region\n", | |
685 | slot->ds_node_num); | |
686 | ||
687 | /* last off the live_slot generates a callback */ | |
688 | list_del_init(&slot->ds_live_item); | |
689 | if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { | |
690 | clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); | |
691 | ||
692 | o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, | |
693 | slot->ds_node_num); | |
694 | ||
695 | changed = 1; | |
696 | } | |
697 | ||
698 | /* We don't clear this because the node is still | |
699 | * actually writing new blocks. */ | |
700 | if (!gen_changed) | |
701 | slot->ds_changed_samples = 0; | |
702 | goto out; | |
703 | } | |
704 | if (slot->ds_changed_samples) { | |
705 | slot->ds_changed_samples = 0; | |
706 | slot->ds_equal_samples = 0; | |
707 | } | |
708 | out: | |
709 | spin_unlock(&o2hb_live_lock); | |
710 | ||
711 | o2hb_run_event_list(&event); | |
712 | ||
713 | o2nm_node_put(node); | |
714 | return changed; | |
715 | } | |
716 | ||
717 | /* This could be faster if we just implmented a find_last_bit, but I | |
718 | * don't think the circumstances warrant it. */ | |
719 | static int o2hb_highest_node(unsigned long *nodes, | |
720 | int numbits) | |
721 | { | |
722 | int highest, node; | |
723 | ||
724 | highest = numbits; | |
725 | node = -1; | |
726 | while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) { | |
727 | if (node >= numbits) | |
728 | break; | |
729 | ||
730 | highest = node; | |
731 | } | |
732 | ||
733 | return highest; | |
734 | } | |
735 | ||
a9e2ae39 | 736 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) |
a7f6a5fb MF |
737 | { |
738 | int i, ret, highest_node, change = 0; | |
739 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
a7f6a5fb MF |
740 | struct o2hb_bio_wait_ctxt write_wc; |
741 | ||
a9e2ae39 MF |
742 | ret = o2nm_configured_node_map(configured_nodes, |
743 | sizeof(configured_nodes)); | |
744 | if (ret) { | |
745 | mlog_errno(ret); | |
746 | return ret; | |
747 | } | |
a7f6a5fb MF |
748 | |
749 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); | |
750 | if (highest_node >= O2NM_MAX_NODES) { | |
751 | mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n"); | |
a9e2ae39 | 752 | return -EINVAL; |
a7f6a5fb MF |
753 | } |
754 | ||
755 | /* No sense in reading the slots of nodes that don't exist | |
756 | * yet. Of course, if the node definitions have holes in them | |
757 | * then we're reading an empty slot anyway... Consider this | |
758 | * best-effort. */ | |
759 | ret = o2hb_read_slots(reg, highest_node + 1); | |
760 | if (ret < 0) { | |
761 | mlog_errno(ret); | |
a9e2ae39 | 762 | return ret; |
a7f6a5fb MF |
763 | } |
764 | ||
765 | /* With an up to date view of the slots, we can check that no | |
766 | * other node has been improperly configured to heartbeat in | |
767 | * our slot. */ | |
768 | if (!o2hb_check_last_timestamp(reg)) | |
769 | mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " | |
770 | "in our slot!\n", reg->hr_dev_name); | |
771 | ||
772 | /* fill in the proper info for our next heartbeat */ | |
773 | o2hb_prepare_block(reg, reg->hr_generation); | |
774 | ||
775 | /* And fire off the write. Note that we don't wait on this I/O | |
776 | * until later. */ | |
b559292e | 777 | ret = o2hb_issue_node_write(reg, &write_wc); |
a7f6a5fb MF |
778 | if (ret < 0) { |
779 | mlog_errno(ret); | |
a9e2ae39 | 780 | return ret; |
a7f6a5fb MF |
781 | } |
782 | ||
783 | i = -1; | |
784 | while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | |
785 | ||
786 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); | |
787 | } | |
788 | ||
789 | /* | |
790 | * We have to be sure we've advertised ourselves on disk | |
791 | * before we can go to steady state. This ensures that | |
792 | * people we find in our steady state have seen us. | |
793 | */ | |
794 | o2hb_wait_on_io(reg, &write_wc); | |
a9e2ae39 MF |
795 | if (write_wc.wc_error) { |
796 | /* Do not re-arm the write timeout on I/O error - we | |
797 | * can't be sure that the new block ever made it to | |
798 | * disk */ | |
799 | mlog(ML_ERROR, "Write error %d on device \"%s\"\n", | |
800 | write_wc.wc_error, reg->hr_dev_name); | |
801 | return write_wc.wc_error; | |
802 | } | |
803 | ||
a7f6a5fb MF |
804 | o2hb_arm_write_timeout(reg); |
805 | ||
806 | /* let the person who launched us know when things are steady */ | |
807 | if (!change && (atomic_read(®->hr_steady_iterations) != 0)) { | |
808 | if (atomic_dec_and_test(®->hr_steady_iterations)) | |
809 | wake_up(&o2hb_steady_queue); | |
810 | } | |
a9e2ae39 MF |
811 | |
812 | return 0; | |
a7f6a5fb MF |
813 | } |
814 | ||
815 | /* Subtract b from a, storing the result in a. a *must* have a larger | |
816 | * value than b. */ | |
817 | static void o2hb_tv_subtract(struct timeval *a, | |
818 | struct timeval *b) | |
819 | { | |
820 | /* just return 0 when a is after b */ | |
821 | if (a->tv_sec < b->tv_sec || | |
822 | (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) { | |
823 | a->tv_sec = 0; | |
824 | a->tv_usec = 0; | |
825 | return; | |
826 | } | |
827 | ||
828 | a->tv_sec -= b->tv_sec; | |
829 | a->tv_usec -= b->tv_usec; | |
830 | while ( a->tv_usec < 0 ) { | |
831 | a->tv_sec--; | |
832 | a->tv_usec += 1000000; | |
833 | } | |
834 | } | |
835 | ||
836 | static unsigned int o2hb_elapsed_msecs(struct timeval *start, | |
837 | struct timeval *end) | |
838 | { | |
839 | struct timeval res = *end; | |
840 | ||
841 | o2hb_tv_subtract(&res, start); | |
842 | ||
843 | return res.tv_sec * 1000 + res.tv_usec / 1000; | |
844 | } | |
845 | ||
846 | /* | |
847 | * we ride the region ref that the region dir holds. before the region | |
848 | * dir is removed and drops it ref it will wait to tear down this | |
849 | * thread. | |
850 | */ | |
851 | static int o2hb_thread(void *data) | |
852 | { | |
853 | int i, ret; | |
854 | struct o2hb_region *reg = data; | |
a7f6a5fb MF |
855 | struct o2hb_bio_wait_ctxt write_wc; |
856 | struct timeval before_hb, after_hb; | |
857 | unsigned int elapsed_msec; | |
858 | ||
859 | mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n"); | |
860 | ||
861 | set_user_nice(current, -20); | |
862 | ||
863 | while (!kthread_should_stop() && !reg->hr_unclean_stop) { | |
864 | /* We track the time spent inside | |
025dfdaf | 865 | * o2hb_do_disk_heartbeat so that we avoid more than |
a7f6a5fb MF |
866 | * hr_timeout_ms between disk writes. On busy systems |
867 | * this should result in a heartbeat which is less | |
868 | * likely to time itself out. */ | |
869 | do_gettimeofday(&before_hb); | |
870 | ||
a9e2ae39 MF |
871 | i = 0; |
872 | do { | |
873 | ret = o2hb_do_disk_heartbeat(reg); | |
874 | } while (ret && ++i < 2); | |
a7f6a5fb MF |
875 | |
876 | do_gettimeofday(&after_hb); | |
877 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); | |
878 | ||
b31d308d TM |
879 | mlog(ML_HEARTBEAT, |
880 | "start = %lu.%lu, end = %lu.%lu, msec = %u\n", | |
215c7f9f MF |
881 | before_hb.tv_sec, (unsigned long) before_hb.tv_usec, |
882 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, | |
883 | elapsed_msec); | |
a7f6a5fb MF |
884 | |
885 | if (elapsed_msec < reg->hr_timeout_ms) { | |
886 | /* the kthread api has blocked signals for us so no | |
887 | * need to record the return value. */ | |
888 | msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); | |
889 | } | |
890 | } | |
891 | ||
892 | o2hb_disarm_write_timeout(reg); | |
893 | ||
894 | /* unclean stop is only used in very bad situation */ | |
895 | for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++) | |
896 | o2hb_shutdown_slot(®->hr_slots[i]); | |
897 | ||
898 | /* Explicit down notification - avoid forcing the other nodes | |
899 | * to timeout on this region when we could just as easily | |
900 | * write a clear generation - thus indicating to them that | |
901 | * this node has left this region. | |
902 | * | |
903 | * XXX: Should we skip this on unclean_stop? */ | |
904 | o2hb_prepare_block(reg, 0); | |
b559292e | 905 | ret = o2hb_issue_node_write(reg, &write_wc); |
a7f6a5fb MF |
906 | if (ret == 0) { |
907 | o2hb_wait_on_io(reg, &write_wc); | |
a7f6a5fb MF |
908 | } else { |
909 | mlog_errno(ret); | |
910 | } | |
911 | ||
912 | mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); | |
913 | ||
914 | return 0; | |
915 | } | |
916 | ||
87d3d3f3 SM |
917 | #ifdef CONFIG_DEBUG_FS |
918 | static int o2hb_debug_open(struct inode *inode, struct file *file) | |
919 | { | |
920 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
921 | char *buf = NULL; | |
922 | int i = -1; | |
923 | int out = 0; | |
924 | ||
925 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
926 | if (!buf) | |
927 | goto bail; | |
928 | ||
929 | o2hb_fill_node_map(map, sizeof(map)); | |
930 | ||
931 | while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) | |
932 | out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); | |
933 | out += snprintf(buf + out, PAGE_SIZE - out, "\n"); | |
934 | ||
935 | i_size_write(inode, out); | |
936 | ||
937 | file->private_data = buf; | |
938 | ||
939 | return 0; | |
940 | bail: | |
941 | return -ENOMEM; | |
942 | } | |
943 | ||
944 | static int o2hb_debug_release(struct inode *inode, struct file *file) | |
945 | { | |
946 | kfree(file->private_data); | |
947 | return 0; | |
948 | } | |
949 | ||
950 | static ssize_t o2hb_debug_read(struct file *file, char __user *buf, | |
951 | size_t nbytes, loff_t *ppos) | |
952 | { | |
953 | return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, | |
954 | i_size_read(file->f_mapping->host)); | |
955 | } | |
956 | #else | |
957 | static int o2hb_debug_open(struct inode *inode, struct file *file) | |
958 | { | |
959 | return 0; | |
960 | } | |
961 | static int o2hb_debug_release(struct inode *inode, struct file *file) | |
962 | { | |
963 | return 0; | |
964 | } | |
965 | static ssize_t o2hb_debug_read(struct file *file, char __user *buf, | |
966 | size_t nbytes, loff_t *ppos) | |
967 | { | |
968 | return 0; | |
969 | } | |
970 | #endif /* CONFIG_DEBUG_FS */ | |
971 | ||
828c0950 | 972 | static const struct file_operations o2hb_debug_fops = { |
87d3d3f3 SM |
973 | .open = o2hb_debug_open, |
974 | .release = o2hb_debug_release, | |
975 | .read = o2hb_debug_read, | |
976 | .llseek = generic_file_llseek, | |
977 | }; | |
978 | ||
979 | void o2hb_exit(void) | |
980 | { | |
981 | if (o2hb_debug_livenodes) | |
982 | debugfs_remove(o2hb_debug_livenodes); | |
983 | if (o2hb_debug_dir) | |
984 | debugfs_remove(o2hb_debug_dir); | |
985 | } | |
986 | ||
987 | int o2hb_init(void) | |
a7f6a5fb MF |
988 | { |
989 | int i; | |
990 | ||
991 | for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++) | |
992 | INIT_LIST_HEAD(&o2hb_callbacks[i].list); | |
993 | ||
994 | for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++) | |
995 | INIT_LIST_HEAD(&o2hb_live_slots[i]); | |
996 | ||
997 | INIT_LIST_HEAD(&o2hb_node_events); | |
998 | ||
999 | memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap)); | |
87d3d3f3 SM |
1000 | |
1001 | o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL); | |
1002 | if (!o2hb_debug_dir) { | |
1003 | mlog_errno(-ENOMEM); | |
1004 | return -ENOMEM; | |
1005 | } | |
1006 | ||
1007 | o2hb_debug_livenodes = debugfs_create_file(O2HB_DEBUG_LIVENODES, | |
1008 | S_IFREG|S_IRUSR, | |
1009 | o2hb_debug_dir, NULL, | |
1010 | &o2hb_debug_fops); | |
1011 | if (!o2hb_debug_livenodes) { | |
1012 | mlog_errno(-ENOMEM); | |
1013 | debugfs_remove(o2hb_debug_dir); | |
1014 | return -ENOMEM; | |
1015 | } | |
1016 | ||
1017 | return 0; | |
a7f6a5fb MF |
1018 | } |
1019 | ||
1020 | /* if we're already in a callback then we're already serialized by the sem */ | |
1021 | static void o2hb_fill_node_map_from_callback(unsigned long *map, | |
1022 | unsigned bytes) | |
1023 | { | |
1024 | BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))); | |
1025 | ||
1026 | memcpy(map, &o2hb_live_node_bitmap, bytes); | |
1027 | } | |
1028 | ||
1029 | /* | |
1030 | * get a map of all nodes that are heartbeating in any regions | |
1031 | */ | |
1032 | void o2hb_fill_node_map(unsigned long *map, unsigned bytes) | |
1033 | { | |
1034 | /* callers want to serialize this map and callbacks so that they | |
1035 | * can trust that they don't miss nodes coming to the party */ | |
1036 | down_read(&o2hb_callback_sem); | |
1037 | spin_lock(&o2hb_live_lock); | |
1038 | o2hb_fill_node_map_from_callback(map, bytes); | |
1039 | spin_unlock(&o2hb_live_lock); | |
1040 | up_read(&o2hb_callback_sem); | |
1041 | } | |
1042 | EXPORT_SYMBOL_GPL(o2hb_fill_node_map); | |
1043 | ||
1044 | /* | |
1045 | * heartbeat configfs bits. The heartbeat set is a default set under | |
1046 | * the cluster set in nodemanager.c. | |
1047 | */ | |
1048 | ||
1049 | static struct o2hb_region *to_o2hb_region(struct config_item *item) | |
1050 | { | |
1051 | return item ? container_of(item, struct o2hb_region, hr_item) : NULL; | |
1052 | } | |
1053 | ||
1054 | /* drop_item only drops its ref after killing the thread, nothing should | |
1055 | * be using the region anymore. this has to clean up any state that | |
1056 | * attributes might have built up. */ | |
1057 | static void o2hb_region_release(struct config_item *item) | |
1058 | { | |
1059 | int i; | |
1060 | struct page *page; | |
1061 | struct o2hb_region *reg = to_o2hb_region(item); | |
1062 | ||
1063 | if (reg->hr_tmp_block) | |
1064 | kfree(reg->hr_tmp_block); | |
1065 | ||
1066 | if (reg->hr_slot_data) { | |
1067 | for (i = 0; i < reg->hr_num_pages; i++) { | |
1068 | page = reg->hr_slot_data[i]; | |
1069 | if (page) | |
1070 | __free_page(page); | |
1071 | } | |
1072 | kfree(reg->hr_slot_data); | |
1073 | } | |
1074 | ||
1075 | if (reg->hr_bdev) | |
9a1c3542 | 1076 | blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); |
a7f6a5fb MF |
1077 | |
1078 | if (reg->hr_slots) | |
1079 | kfree(reg->hr_slots); | |
1080 | ||
1081 | spin_lock(&o2hb_live_lock); | |
1082 | list_del(®->hr_all_item); | |
1083 | spin_unlock(&o2hb_live_lock); | |
1084 | ||
1085 | kfree(reg); | |
1086 | } | |
1087 | ||
1088 | static int o2hb_read_block_input(struct o2hb_region *reg, | |
1089 | const char *page, | |
1090 | size_t count, | |
1091 | unsigned long *ret_bytes, | |
1092 | unsigned int *ret_bits) | |
1093 | { | |
1094 | unsigned long bytes; | |
1095 | char *p = (char *)page; | |
1096 | ||
1097 | bytes = simple_strtoul(p, &p, 0); | |
1098 | if (!p || (*p && (*p != '\n'))) | |
1099 | return -EINVAL; | |
1100 | ||
1101 | /* Heartbeat and fs min / max block sizes are the same. */ | |
1102 | if (bytes > 4096 || bytes < 512) | |
1103 | return -ERANGE; | |
1104 | if (hweight16(bytes) != 1) | |
1105 | return -EINVAL; | |
1106 | ||
1107 | if (ret_bytes) | |
1108 | *ret_bytes = bytes; | |
1109 | if (ret_bits) | |
1110 | *ret_bits = ffs(bytes) - 1; | |
1111 | ||
1112 | return 0; | |
1113 | } | |
1114 | ||
1115 | static ssize_t o2hb_region_block_bytes_read(struct o2hb_region *reg, | |
1116 | char *page) | |
1117 | { | |
1118 | return sprintf(page, "%u\n", reg->hr_block_bytes); | |
1119 | } | |
1120 | ||
1121 | static ssize_t o2hb_region_block_bytes_write(struct o2hb_region *reg, | |
1122 | const char *page, | |
1123 | size_t count) | |
1124 | { | |
1125 | int status; | |
1126 | unsigned long block_bytes; | |
1127 | unsigned int block_bits; | |
1128 | ||
1129 | if (reg->hr_bdev) | |
1130 | return -EINVAL; | |
1131 | ||
1132 | status = o2hb_read_block_input(reg, page, count, | |
1133 | &block_bytes, &block_bits); | |
1134 | if (status) | |
1135 | return status; | |
1136 | ||
1137 | reg->hr_block_bytes = (unsigned int)block_bytes; | |
1138 | reg->hr_block_bits = block_bits; | |
1139 | ||
1140 | return count; | |
1141 | } | |
1142 | ||
1143 | static ssize_t o2hb_region_start_block_read(struct o2hb_region *reg, | |
1144 | char *page) | |
1145 | { | |
1146 | return sprintf(page, "%llu\n", reg->hr_start_block); | |
1147 | } | |
1148 | ||
1149 | static ssize_t o2hb_region_start_block_write(struct o2hb_region *reg, | |
1150 | const char *page, | |
1151 | size_t count) | |
1152 | { | |
1153 | unsigned long long tmp; | |
1154 | char *p = (char *)page; | |
1155 | ||
1156 | if (reg->hr_bdev) | |
1157 | return -EINVAL; | |
1158 | ||
1159 | tmp = simple_strtoull(p, &p, 0); | |
1160 | if (!p || (*p && (*p != '\n'))) | |
1161 | return -EINVAL; | |
1162 | ||
1163 | reg->hr_start_block = tmp; | |
1164 | ||
1165 | return count; | |
1166 | } | |
1167 | ||
1168 | static ssize_t o2hb_region_blocks_read(struct o2hb_region *reg, | |
1169 | char *page) | |
1170 | { | |
1171 | return sprintf(page, "%d\n", reg->hr_blocks); | |
1172 | } | |
1173 | ||
1174 | static ssize_t o2hb_region_blocks_write(struct o2hb_region *reg, | |
1175 | const char *page, | |
1176 | size_t count) | |
1177 | { | |
1178 | unsigned long tmp; | |
1179 | char *p = (char *)page; | |
1180 | ||
1181 | if (reg->hr_bdev) | |
1182 | return -EINVAL; | |
1183 | ||
1184 | tmp = simple_strtoul(p, &p, 0); | |
1185 | if (!p || (*p && (*p != '\n'))) | |
1186 | return -EINVAL; | |
1187 | ||
1188 | if (tmp > O2NM_MAX_NODES || tmp == 0) | |
1189 | return -ERANGE; | |
1190 | ||
1191 | reg->hr_blocks = (unsigned int)tmp; | |
1192 | ||
1193 | return count; | |
1194 | } | |
1195 | ||
1196 | static ssize_t o2hb_region_dev_read(struct o2hb_region *reg, | |
1197 | char *page) | |
1198 | { | |
1199 | unsigned int ret = 0; | |
1200 | ||
1201 | if (reg->hr_bdev) | |
1202 | ret = sprintf(page, "%s\n", reg->hr_dev_name); | |
1203 | ||
1204 | return ret; | |
1205 | } | |
1206 | ||
1207 | static void o2hb_init_region_params(struct o2hb_region *reg) | |
1208 | { | |
1209 | reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; | |
1210 | reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; | |
1211 | ||
1212 | mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", | |
1213 | reg->hr_start_block, reg->hr_blocks); | |
1214 | mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n", | |
1215 | reg->hr_block_bytes, reg->hr_block_bits); | |
1216 | mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms); | |
1217 | mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold); | |
1218 | } | |
1219 | ||
1220 | static int o2hb_map_slot_data(struct o2hb_region *reg) | |
1221 | { | |
1222 | int i, j; | |
1223 | unsigned int last_slot; | |
1224 | unsigned int spp = reg->hr_slots_per_page; | |
1225 | struct page *page; | |
1226 | char *raw; | |
1227 | struct o2hb_disk_slot *slot; | |
1228 | ||
1229 | reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL); | |
1230 | if (reg->hr_tmp_block == NULL) { | |
1231 | mlog_errno(-ENOMEM); | |
1232 | return -ENOMEM; | |
1233 | } | |
1234 | ||
1235 | reg->hr_slots = kcalloc(reg->hr_blocks, | |
1236 | sizeof(struct o2hb_disk_slot), GFP_KERNEL); | |
1237 | if (reg->hr_slots == NULL) { | |
1238 | mlog_errno(-ENOMEM); | |
1239 | return -ENOMEM; | |
1240 | } | |
1241 | ||
1242 | for(i = 0; i < reg->hr_blocks; i++) { | |
1243 | slot = ®->hr_slots[i]; | |
1244 | slot->ds_node_num = i; | |
1245 | INIT_LIST_HEAD(&slot->ds_live_item); | |
1246 | slot->ds_raw_block = NULL; | |
1247 | } | |
1248 | ||
1249 | reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp; | |
1250 | mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks " | |
1251 | "at %u blocks per page\n", | |
1252 | reg->hr_num_pages, reg->hr_blocks, spp); | |
1253 | ||
1254 | reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *), | |
1255 | GFP_KERNEL); | |
1256 | if (!reg->hr_slot_data) { | |
1257 | mlog_errno(-ENOMEM); | |
1258 | return -ENOMEM; | |
1259 | } | |
1260 | ||
1261 | for(i = 0; i < reg->hr_num_pages; i++) { | |
1262 | page = alloc_page(GFP_KERNEL); | |
1263 | if (!page) { | |
1264 | mlog_errno(-ENOMEM); | |
1265 | return -ENOMEM; | |
1266 | } | |
1267 | ||
1268 | reg->hr_slot_data[i] = page; | |
1269 | ||
1270 | last_slot = i * spp; | |
1271 | raw = page_address(page); | |
1272 | for (j = 0; | |
1273 | (j < spp) && ((j + last_slot) < reg->hr_blocks); | |
1274 | j++) { | |
1275 | BUG_ON((j + last_slot) >= reg->hr_blocks); | |
1276 | ||
1277 | slot = ®->hr_slots[j + last_slot]; | |
1278 | slot->ds_raw_block = | |
1279 | (struct o2hb_disk_heartbeat_block *) raw; | |
1280 | ||
1281 | raw += reg->hr_block_bytes; | |
1282 | } | |
1283 | } | |
1284 | ||
1285 | return 0; | |
1286 | } | |
1287 | ||
1288 | /* Read in all the slots available and populate the tracking | |
1289 | * structures so that we can start with a baseline idea of what's | |
1290 | * there. */ | |
1291 | static int o2hb_populate_slot_data(struct o2hb_region *reg) | |
1292 | { | |
1293 | int ret, i; | |
1294 | struct o2hb_disk_slot *slot; | |
1295 | struct o2hb_disk_heartbeat_block *hb_block; | |
1296 | ||
1297 | mlog_entry_void(); | |
1298 | ||
1299 | ret = o2hb_read_slots(reg, reg->hr_blocks); | |
1300 | if (ret) { | |
1301 | mlog_errno(ret); | |
1302 | goto out; | |
1303 | } | |
1304 | ||
1305 | /* We only want to get an idea of the values initially in each | |
1306 | * slot, so we do no verification - o2hb_check_slot will | |
1307 | * actually determine if each configured slot is valid and | |
1308 | * whether any values have changed. */ | |
1309 | for(i = 0; i < reg->hr_blocks; i++) { | |
1310 | slot = ®->hr_slots[i]; | |
1311 | hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block; | |
1312 | ||
1313 | /* Only fill the values that o2hb_check_slot uses to | |
1314 | * determine changing slots */ | |
1315 | slot->ds_last_time = le64_to_cpu(hb_block->hb_seq); | |
1316 | slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); | |
1317 | } | |
1318 | ||
1319 | out: | |
1320 | mlog_exit(ret); | |
1321 | return ret; | |
1322 | } | |
1323 | ||
1324 | /* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */ | |
1325 | static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |
1326 | const char *page, | |
1327 | size_t count) | |
1328 | { | |
e6c352db | 1329 | struct task_struct *hb_task; |
a7f6a5fb MF |
1330 | long fd; |
1331 | int sectsize; | |
1332 | char *p = (char *)page; | |
1333 | struct file *filp = NULL; | |
1334 | struct inode *inode = NULL; | |
1335 | ssize_t ret = -EINVAL; | |
1336 | ||
1337 | if (reg->hr_bdev) | |
1338 | goto out; | |
1339 | ||
1340 | /* We can't heartbeat without having had our node number | |
1341 | * configured yet. */ | |
1342 | if (o2nm_this_node() == O2NM_MAX_NODES) | |
1343 | goto out; | |
1344 | ||
1345 | fd = simple_strtol(p, &p, 0); | |
1346 | if (!p || (*p && (*p != '\n'))) | |
1347 | goto out; | |
1348 | ||
1349 | if (fd < 0 || fd >= INT_MAX) | |
1350 | goto out; | |
1351 | ||
1352 | filp = fget(fd); | |
1353 | if (filp == NULL) | |
1354 | goto out; | |
1355 | ||
1356 | if (reg->hr_blocks == 0 || reg->hr_start_block == 0 || | |
1357 | reg->hr_block_bytes == 0) | |
1358 | goto out; | |
1359 | ||
1360 | inode = igrab(filp->f_mapping->host); | |
1361 | if (inode == NULL) | |
1362 | goto out; | |
1363 | ||
1364 | if (!S_ISBLK(inode->i_mode)) | |
1365 | goto out; | |
1366 | ||
1367 | reg->hr_bdev = I_BDEV(filp->f_mapping->host); | |
572c4892 | 1368 | ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ); |
a7f6a5fb MF |
1369 | if (ret) { |
1370 | reg->hr_bdev = NULL; | |
1371 | goto out; | |
1372 | } | |
1373 | inode = NULL; | |
1374 | ||
1375 | bdevname(reg->hr_bdev, reg->hr_dev_name); | |
1376 | ||
e1defc4f | 1377 | sectsize = bdev_logical_block_size(reg->hr_bdev); |
a7f6a5fb MF |
1378 | if (sectsize != reg->hr_block_bytes) { |
1379 | mlog(ML_ERROR, | |
1380 | "blocksize %u incorrect for device, expected %d", | |
1381 | reg->hr_block_bytes, sectsize); | |
1382 | ret = -EINVAL; | |
1383 | goto out; | |
1384 | } | |
1385 | ||
1386 | o2hb_init_region_params(reg); | |
1387 | ||
1388 | /* Generation of zero is invalid */ | |
1389 | do { | |
1390 | get_random_bytes(®->hr_generation, | |
1391 | sizeof(reg->hr_generation)); | |
1392 | } while (reg->hr_generation == 0); | |
1393 | ||
1394 | ret = o2hb_map_slot_data(reg); | |
1395 | if (ret) { | |
1396 | mlog_errno(ret); | |
1397 | goto out; | |
1398 | } | |
1399 | ||
1400 | ret = o2hb_populate_slot_data(reg); | |
1401 | if (ret) { | |
1402 | mlog_errno(ret); | |
1403 | goto out; | |
1404 | } | |
1405 | ||
c4028958 | 1406 | INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); |
a7f6a5fb MF |
1407 | |
1408 | /* | |
1409 | * A node is considered live after it has beat LIVE_THRESHOLD | |
1410 | * times. We're not steady until we've given them a chance | |
1411 | * _after_ our first read. | |
1412 | */ | |
1413 | atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); | |
1414 | ||
e6c352db JB |
1415 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", |
1416 | reg->hr_item.ci_name); | |
1417 | if (IS_ERR(hb_task)) { | |
1418 | ret = PTR_ERR(hb_task); | |
a7f6a5fb | 1419 | mlog_errno(ret); |
a7f6a5fb MF |
1420 | goto out; |
1421 | } | |
1422 | ||
e6c352db JB |
1423 | spin_lock(&o2hb_live_lock); |
1424 | reg->hr_task = hb_task; | |
1425 | spin_unlock(&o2hb_live_lock); | |
1426 | ||
a7f6a5fb MF |
1427 | ret = wait_event_interruptible(o2hb_steady_queue, |
1428 | atomic_read(®->hr_steady_iterations) == 0); | |
1429 | if (ret) { | |
e6df3a66 | 1430 | /* We got interrupted (hello ptrace!). Clean up */ |
e6c352db JB |
1431 | spin_lock(&o2hb_live_lock); |
1432 | hb_task = reg->hr_task; | |
a7f6a5fb | 1433 | reg->hr_task = NULL; |
e6c352db JB |
1434 | spin_unlock(&o2hb_live_lock); |
1435 | ||
1436 | if (hb_task) | |
1437 | kthread_stop(hb_task); | |
a7f6a5fb MF |
1438 | goto out; |
1439 | } | |
1440 | ||
e6df3a66 JB |
1441 | /* Ok, we were woken. Make sure it wasn't by drop_item() */ |
1442 | spin_lock(&o2hb_live_lock); | |
1443 | hb_task = reg->hr_task; | |
1444 | spin_unlock(&o2hb_live_lock); | |
1445 | ||
1446 | if (hb_task) | |
1447 | ret = count; | |
1448 | else | |
1449 | ret = -EIO; | |
1450 | ||
a7f6a5fb MF |
1451 | out: |
1452 | if (filp) | |
1453 | fput(filp); | |
1454 | if (inode) | |
1455 | iput(inode); | |
1456 | if (ret < 0) { | |
1457 | if (reg->hr_bdev) { | |
9a1c3542 | 1458 | blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); |
a7f6a5fb MF |
1459 | reg->hr_bdev = NULL; |
1460 | } | |
1461 | } | |
1462 | return ret; | |
1463 | } | |
1464 | ||
92efc152 ZW |
1465 | static ssize_t o2hb_region_pid_read(struct o2hb_region *reg, |
1466 | char *page) | |
1467 | { | |
e6c352db JB |
1468 | pid_t pid = 0; |
1469 | ||
1470 | spin_lock(&o2hb_live_lock); | |
1471 | if (reg->hr_task) | |
ba25f9dc | 1472 | pid = task_pid_nr(reg->hr_task); |
e6c352db JB |
1473 | spin_unlock(&o2hb_live_lock); |
1474 | ||
1475 | if (!pid) | |
92efc152 ZW |
1476 | return 0; |
1477 | ||
e6c352db | 1478 | return sprintf(page, "%u\n", pid); |
92efc152 ZW |
1479 | } |
1480 | ||
a7f6a5fb MF |
1481 | struct o2hb_region_attribute { |
1482 | struct configfs_attribute attr; | |
1483 | ssize_t (*show)(struct o2hb_region *, char *); | |
1484 | ssize_t (*store)(struct o2hb_region *, const char *, size_t); | |
1485 | }; | |
1486 | ||
1487 | static struct o2hb_region_attribute o2hb_region_attr_block_bytes = { | |
1488 | .attr = { .ca_owner = THIS_MODULE, | |
1489 | .ca_name = "block_bytes", | |
1490 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1491 | .show = o2hb_region_block_bytes_read, | |
1492 | .store = o2hb_region_block_bytes_write, | |
1493 | }; | |
1494 | ||
1495 | static struct o2hb_region_attribute o2hb_region_attr_start_block = { | |
1496 | .attr = { .ca_owner = THIS_MODULE, | |
1497 | .ca_name = "start_block", | |
1498 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1499 | .show = o2hb_region_start_block_read, | |
1500 | .store = o2hb_region_start_block_write, | |
1501 | }; | |
1502 | ||
1503 | static struct o2hb_region_attribute o2hb_region_attr_blocks = { | |
1504 | .attr = { .ca_owner = THIS_MODULE, | |
1505 | .ca_name = "blocks", | |
1506 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1507 | .show = o2hb_region_blocks_read, | |
1508 | .store = o2hb_region_blocks_write, | |
1509 | }; | |
1510 | ||
1511 | static struct o2hb_region_attribute o2hb_region_attr_dev = { | |
1512 | .attr = { .ca_owner = THIS_MODULE, | |
1513 | .ca_name = "dev", | |
1514 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1515 | .show = o2hb_region_dev_read, | |
1516 | .store = o2hb_region_dev_write, | |
1517 | }; | |
1518 | ||
92efc152 ZW |
1519 | static struct o2hb_region_attribute o2hb_region_attr_pid = { |
1520 | .attr = { .ca_owner = THIS_MODULE, | |
1521 | .ca_name = "pid", | |
1522 | .ca_mode = S_IRUGO | S_IRUSR }, | |
1523 | .show = o2hb_region_pid_read, | |
1524 | }; | |
1525 | ||
a7f6a5fb MF |
1526 | static struct configfs_attribute *o2hb_region_attrs[] = { |
1527 | &o2hb_region_attr_block_bytes.attr, | |
1528 | &o2hb_region_attr_start_block.attr, | |
1529 | &o2hb_region_attr_blocks.attr, | |
1530 | &o2hb_region_attr_dev.attr, | |
92efc152 | 1531 | &o2hb_region_attr_pid.attr, |
a7f6a5fb MF |
1532 | NULL, |
1533 | }; | |
1534 | ||
1535 | static ssize_t o2hb_region_show(struct config_item *item, | |
1536 | struct configfs_attribute *attr, | |
1537 | char *page) | |
1538 | { | |
1539 | struct o2hb_region *reg = to_o2hb_region(item); | |
1540 | struct o2hb_region_attribute *o2hb_region_attr = | |
1541 | container_of(attr, struct o2hb_region_attribute, attr); | |
1542 | ssize_t ret = 0; | |
1543 | ||
1544 | if (o2hb_region_attr->show) | |
1545 | ret = o2hb_region_attr->show(reg, page); | |
1546 | return ret; | |
1547 | } | |
1548 | ||
1549 | static ssize_t o2hb_region_store(struct config_item *item, | |
1550 | struct configfs_attribute *attr, | |
1551 | const char *page, size_t count) | |
1552 | { | |
1553 | struct o2hb_region *reg = to_o2hb_region(item); | |
1554 | struct o2hb_region_attribute *o2hb_region_attr = | |
1555 | container_of(attr, struct o2hb_region_attribute, attr); | |
1556 | ssize_t ret = -EINVAL; | |
1557 | ||
1558 | if (o2hb_region_attr->store) | |
1559 | ret = o2hb_region_attr->store(reg, page, count); | |
1560 | return ret; | |
1561 | } | |
1562 | ||
1563 | static struct configfs_item_operations o2hb_region_item_ops = { | |
1564 | .release = o2hb_region_release, | |
1565 | .show_attribute = o2hb_region_show, | |
1566 | .store_attribute = o2hb_region_store, | |
1567 | }; | |
1568 | ||
1569 | static struct config_item_type o2hb_region_type = { | |
1570 | .ct_item_ops = &o2hb_region_item_ops, | |
1571 | .ct_attrs = o2hb_region_attrs, | |
1572 | .ct_owner = THIS_MODULE, | |
1573 | }; | |
1574 | ||
1575 | /* heartbeat set */ | |
1576 | ||
1577 | struct o2hb_heartbeat_group { | |
1578 | struct config_group hs_group; | |
1579 | /* some stuff? */ | |
1580 | }; | |
1581 | ||
1582 | static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group) | |
1583 | { | |
1584 | return group ? | |
1585 | container_of(group, struct o2hb_heartbeat_group, hs_group) | |
1586 | : NULL; | |
1587 | } | |
1588 | ||
f89ab861 JB |
1589 | static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group, |
1590 | const char *name) | |
a7f6a5fb MF |
1591 | { |
1592 | struct o2hb_region *reg = NULL; | |
a7f6a5fb | 1593 | |
cd861280 | 1594 | reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL); |
f89ab861 | 1595 | if (reg == NULL) |
a6795e9e | 1596 | return ERR_PTR(-ENOMEM); |
a7f6a5fb MF |
1597 | |
1598 | config_item_init_type_name(®->hr_item, name, &o2hb_region_type); | |
1599 | ||
a7f6a5fb MF |
1600 | spin_lock(&o2hb_live_lock); |
1601 | list_add_tail(®->hr_all_item, &o2hb_all_regions); | |
1602 | spin_unlock(&o2hb_live_lock); | |
a7f6a5fb | 1603 | |
a6795e9e | 1604 | return ®->hr_item; |
a7f6a5fb MF |
1605 | } |
1606 | ||
1607 | static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |
1608 | struct config_item *item) | |
1609 | { | |
e6c352db | 1610 | struct task_struct *hb_task; |
a7f6a5fb MF |
1611 | struct o2hb_region *reg = to_o2hb_region(item); |
1612 | ||
1613 | /* stop the thread when the user removes the region dir */ | |
e6c352db JB |
1614 | spin_lock(&o2hb_live_lock); |
1615 | hb_task = reg->hr_task; | |
1616 | reg->hr_task = NULL; | |
1617 | spin_unlock(&o2hb_live_lock); | |
1618 | ||
1619 | if (hb_task) | |
1620 | kthread_stop(hb_task); | |
a7f6a5fb | 1621 | |
e6df3a66 JB |
1622 | /* |
1623 | * If we're racing a dev_write(), we need to wake them. They will | |
1624 | * check reg->hr_task | |
1625 | */ | |
1626 | if (atomic_read(®->hr_steady_iterations) != 0) { | |
1627 | atomic_set(®->hr_steady_iterations, 0); | |
1628 | wake_up(&o2hb_steady_queue); | |
1629 | } | |
1630 | ||
a7f6a5fb MF |
1631 | config_item_put(item); |
1632 | } | |
1633 | ||
1634 | struct o2hb_heartbeat_group_attribute { | |
1635 | struct configfs_attribute attr; | |
1636 | ssize_t (*show)(struct o2hb_heartbeat_group *, char *); | |
1637 | ssize_t (*store)(struct o2hb_heartbeat_group *, const char *, size_t); | |
1638 | }; | |
1639 | ||
1640 | static ssize_t o2hb_heartbeat_group_show(struct config_item *item, | |
1641 | struct configfs_attribute *attr, | |
1642 | char *page) | |
1643 | { | |
1644 | struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item)); | |
1645 | struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr = | |
1646 | container_of(attr, struct o2hb_heartbeat_group_attribute, attr); | |
1647 | ssize_t ret = 0; | |
1648 | ||
1649 | if (o2hb_heartbeat_group_attr->show) | |
1650 | ret = o2hb_heartbeat_group_attr->show(reg, page); | |
1651 | return ret; | |
1652 | } | |
1653 | ||
1654 | static ssize_t o2hb_heartbeat_group_store(struct config_item *item, | |
1655 | struct configfs_attribute *attr, | |
1656 | const char *page, size_t count) | |
1657 | { | |
1658 | struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item)); | |
1659 | struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr = | |
1660 | container_of(attr, struct o2hb_heartbeat_group_attribute, attr); | |
1661 | ssize_t ret = -EINVAL; | |
1662 | ||
1663 | if (o2hb_heartbeat_group_attr->store) | |
1664 | ret = o2hb_heartbeat_group_attr->store(reg, page, count); | |
1665 | return ret; | |
1666 | } | |
1667 | ||
1668 | static ssize_t o2hb_heartbeat_group_threshold_show(struct o2hb_heartbeat_group *group, | |
1669 | char *page) | |
1670 | { | |
1671 | return sprintf(page, "%u\n", o2hb_dead_threshold); | |
1672 | } | |
1673 | ||
1674 | static ssize_t o2hb_heartbeat_group_threshold_store(struct o2hb_heartbeat_group *group, | |
1675 | const char *page, | |
1676 | size_t count) | |
1677 | { | |
1678 | unsigned long tmp; | |
1679 | char *p = (char *)page; | |
1680 | ||
1681 | tmp = simple_strtoul(p, &p, 10); | |
1682 | if (!p || (*p && (*p != '\n'))) | |
1683 | return -EINVAL; | |
1684 | ||
1685 | /* this will validate ranges for us. */ | |
1686 | o2hb_dead_threshold_set((unsigned int) tmp); | |
1687 | ||
1688 | return count; | |
1689 | } | |
1690 | ||
1691 | static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_threshold = { | |
1692 | .attr = { .ca_owner = THIS_MODULE, | |
1693 | .ca_name = "dead_threshold", | |
1694 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1695 | .show = o2hb_heartbeat_group_threshold_show, | |
1696 | .store = o2hb_heartbeat_group_threshold_store, | |
1697 | }; | |
1698 | ||
1699 | static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { | |
1700 | &o2hb_heartbeat_group_attr_threshold.attr, | |
1701 | NULL, | |
1702 | }; | |
1703 | ||
1704 | static struct configfs_item_operations o2hb_hearbeat_group_item_ops = { | |
1705 | .show_attribute = o2hb_heartbeat_group_show, | |
1706 | .store_attribute = o2hb_heartbeat_group_store, | |
1707 | }; | |
1708 | ||
1709 | static struct configfs_group_operations o2hb_heartbeat_group_group_ops = { | |
1710 | .make_item = o2hb_heartbeat_group_make_item, | |
1711 | .drop_item = o2hb_heartbeat_group_drop_item, | |
1712 | }; | |
1713 | ||
1714 | static struct config_item_type o2hb_heartbeat_group_type = { | |
1715 | .ct_group_ops = &o2hb_heartbeat_group_group_ops, | |
1716 | .ct_item_ops = &o2hb_hearbeat_group_item_ops, | |
1717 | .ct_attrs = o2hb_heartbeat_group_attrs, | |
1718 | .ct_owner = THIS_MODULE, | |
1719 | }; | |
1720 | ||
1721 | /* this is just here to avoid touching group in heartbeat.h which the | |
1722 | * entire damn world #includes */ | |
1723 | struct config_group *o2hb_alloc_hb_set(void) | |
1724 | { | |
1725 | struct o2hb_heartbeat_group *hs = NULL; | |
1726 | struct config_group *ret = NULL; | |
1727 | ||
cd861280 | 1728 | hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL); |
a7f6a5fb MF |
1729 | if (hs == NULL) |
1730 | goto out; | |
1731 | ||
1732 | config_group_init_type_name(&hs->hs_group, "heartbeat", | |
1733 | &o2hb_heartbeat_group_type); | |
1734 | ||
1735 | ret = &hs->hs_group; | |
1736 | out: | |
1737 | if (ret == NULL) | |
1738 | kfree(hs); | |
1739 | return ret; | |
1740 | } | |
1741 | ||
1742 | void o2hb_free_hb_set(struct config_group *group) | |
1743 | { | |
1744 | struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group); | |
1745 | kfree(hs); | |
1746 | } | |
1747 | ||
1748 | /* hb callback registration and issueing */ | |
1749 | ||
1750 | static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type) | |
1751 | { | |
1752 | if (type == O2HB_NUM_CB) | |
1753 | return ERR_PTR(-EINVAL); | |
1754 | ||
1755 | return &o2hb_callbacks[type]; | |
1756 | } | |
1757 | ||
1758 | void o2hb_setup_callback(struct o2hb_callback_func *hc, | |
1759 | enum o2hb_callback_type type, | |
1760 | o2hb_cb_func *func, | |
1761 | void *data, | |
1762 | int priority) | |
1763 | { | |
1764 | INIT_LIST_HEAD(&hc->hc_item); | |
1765 | hc->hc_func = func; | |
1766 | hc->hc_data = data; | |
1767 | hc->hc_priority = priority; | |
1768 | hc->hc_type = type; | |
1769 | hc->hc_magic = O2HB_CB_MAGIC; | |
1770 | } | |
1771 | EXPORT_SYMBOL_GPL(o2hb_setup_callback); | |
1772 | ||
14829422 JB |
1773 | static struct o2hb_region *o2hb_find_region(const char *region_uuid) |
1774 | { | |
1775 | struct o2hb_region *p, *reg = NULL; | |
1776 | ||
1777 | assert_spin_locked(&o2hb_live_lock); | |
1778 | ||
1779 | list_for_each_entry(p, &o2hb_all_regions, hr_all_item) { | |
1780 | if (!strcmp(region_uuid, config_item_name(&p->hr_item))) { | |
1781 | reg = p; | |
1782 | break; | |
1783 | } | |
1784 | } | |
1785 | ||
1786 | return reg; | |
1787 | } | |
1788 | ||
1789 | static int o2hb_region_get(const char *region_uuid) | |
1790 | { | |
1791 | int ret = 0; | |
1792 | struct o2hb_region *reg; | |
1793 | ||
1794 | spin_lock(&o2hb_live_lock); | |
1795 | ||
1796 | reg = o2hb_find_region(region_uuid); | |
1797 | if (!reg) | |
1798 | ret = -ENOENT; | |
1799 | spin_unlock(&o2hb_live_lock); | |
1800 | ||
16c6a4f2 JB |
1801 | if (ret) |
1802 | goto out; | |
1803 | ||
1804 | ret = o2nm_depend_this_node(); | |
1805 | if (ret) | |
1806 | goto out; | |
14829422 | 1807 | |
16c6a4f2 JB |
1808 | ret = o2nm_depend_item(®->hr_item); |
1809 | if (ret) | |
1810 | o2nm_undepend_this_node(); | |
1811 | ||
1812 | out: | |
14829422 JB |
1813 | return ret; |
1814 | } | |
1815 | ||
1816 | static void o2hb_region_put(const char *region_uuid) | |
1817 | { | |
1818 | struct o2hb_region *reg; | |
1819 | ||
1820 | spin_lock(&o2hb_live_lock); | |
1821 | ||
1822 | reg = o2hb_find_region(region_uuid); | |
1823 | ||
1824 | spin_unlock(&o2hb_live_lock); | |
1825 | ||
16c6a4f2 | 1826 | if (reg) { |
14829422 | 1827 | o2nm_undepend_item(®->hr_item); |
16c6a4f2 JB |
1828 | o2nm_undepend_this_node(); |
1829 | } | |
14829422 JB |
1830 | } |
1831 | ||
1832 | int o2hb_register_callback(const char *region_uuid, | |
1833 | struct o2hb_callback_func *hc) | |
a7f6a5fb MF |
1834 | { |
1835 | struct o2hb_callback_func *tmp; | |
1836 | struct list_head *iter; | |
1837 | struct o2hb_callback *hbcall; | |
1838 | int ret; | |
1839 | ||
1840 | BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); | |
1841 | BUG_ON(!list_empty(&hc->hc_item)); | |
1842 | ||
1843 | hbcall = hbcall_from_type(hc->hc_type); | |
1844 | if (IS_ERR(hbcall)) { | |
1845 | ret = PTR_ERR(hbcall); | |
1846 | goto out; | |
1847 | } | |
1848 | ||
14829422 JB |
1849 | if (region_uuid) { |
1850 | ret = o2hb_region_get(region_uuid); | |
1851 | if (ret) | |
1852 | goto out; | |
1853 | } | |
1854 | ||
a7f6a5fb MF |
1855 | down_write(&o2hb_callback_sem); |
1856 | ||
1857 | list_for_each(iter, &hbcall->list) { | |
1858 | tmp = list_entry(iter, struct o2hb_callback_func, hc_item); | |
1859 | if (hc->hc_priority < tmp->hc_priority) { | |
1860 | list_add_tail(&hc->hc_item, iter); | |
1861 | break; | |
1862 | } | |
1863 | } | |
1864 | if (list_empty(&hc->hc_item)) | |
1865 | list_add_tail(&hc->hc_item, &hbcall->list); | |
1866 | ||
1867 | up_write(&o2hb_callback_sem); | |
1868 | ret = 0; | |
1869 | out: | |
1870 | mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n", | |
1871 | ret, __builtin_return_address(0), hc); | |
1872 | return ret; | |
1873 | } | |
1874 | EXPORT_SYMBOL_GPL(o2hb_register_callback); | |
1875 | ||
14829422 JB |
1876 | void o2hb_unregister_callback(const char *region_uuid, |
1877 | struct o2hb_callback_func *hc) | |
a7f6a5fb MF |
1878 | { |
1879 | BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); | |
1880 | ||
1881 | mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n", | |
1882 | __builtin_return_address(0), hc); | |
1883 | ||
14829422 | 1884 | /* XXX Can this happen _with_ a region reference? */ |
a7f6a5fb | 1885 | if (list_empty(&hc->hc_item)) |
c24f72cc | 1886 | return; |
a7f6a5fb | 1887 | |
14829422 JB |
1888 | if (region_uuid) |
1889 | o2hb_region_put(region_uuid); | |
1890 | ||
a7f6a5fb MF |
1891 | down_write(&o2hb_callback_sem); |
1892 | ||
1893 | list_del_init(&hc->hc_item); | |
1894 | ||
1895 | up_write(&o2hb_callback_sem); | |
a7f6a5fb MF |
1896 | } |
1897 | EXPORT_SYMBOL_GPL(o2hb_unregister_callback); | |
1898 | ||
1899 | int o2hb_check_node_heartbeating(u8 node_num) | |
1900 | { | |
1901 | unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
1902 | ||
1903 | o2hb_fill_node_map(testing_map, sizeof(testing_map)); | |
1904 | if (!test_bit(node_num, testing_map)) { | |
1905 | mlog(ML_HEARTBEAT, | |
1906 | "node (%u) does not have heartbeating enabled.\n", | |
1907 | node_num); | |
1908 | return 0; | |
1909 | } | |
1910 | ||
1911 | return 1; | |
1912 | } | |
1913 | EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating); | |
1914 | ||
1915 | int o2hb_check_node_heartbeating_from_callback(u8 node_num) | |
1916 | { | |
1917 | unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
1918 | ||
1919 | o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map)); | |
1920 | if (!test_bit(node_num, testing_map)) { | |
1921 | mlog(ML_HEARTBEAT, | |
1922 | "node (%u) does not have heartbeating enabled.\n", | |
1923 | node_num); | |
1924 | return 0; | |
1925 | } | |
1926 | ||
1927 | return 1; | |
1928 | } | |
1929 | EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback); | |
1930 | ||
1931 | /* Makes sure our local node is configured with a node number, and is | |
1932 | * heartbeating. */ | |
1933 | int o2hb_check_local_node_heartbeating(void) | |
1934 | { | |
1935 | u8 node_num; | |
1936 | ||
1937 | /* if this node was set then we have networking */ | |
1938 | node_num = o2nm_this_node(); | |
1939 | if (node_num == O2NM_MAX_NODES) { | |
1940 | mlog(ML_HEARTBEAT, "this node has not been configured.\n"); | |
1941 | return 0; | |
1942 | } | |
1943 | ||
1944 | return o2hb_check_node_heartbeating(node_num); | |
1945 | } | |
1946 | EXPORT_SYMBOL_GPL(o2hb_check_local_node_heartbeating); | |
1947 | ||
1948 | /* | |
1949 | * this is just a hack until we get the plumbing which flips file systems | |
1950 | * read only and drops the hb ref instead of killing the node dead. | |
1951 | */ | |
1952 | void o2hb_stop_all_regions(void) | |
1953 | { | |
1954 | struct o2hb_region *reg; | |
1955 | ||
1956 | mlog(ML_ERROR, "stopping heartbeat on all active regions.\n"); | |
1957 | ||
1958 | spin_lock(&o2hb_live_lock); | |
1959 | ||
1960 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) | |
1961 | reg->hr_unclean_stop = 1; | |
1962 | ||
1963 | spin_unlock(&o2hb_live_lock); | |
1964 | } | |
1965 | EXPORT_SYMBOL_GPL(o2hb_stop_all_regions); |