]>
Commit | Line | Data |
---|---|---|
a7f6a5fb MF |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * Copyright (C) 2004, 2005 Oracle. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public | |
17 | * License along with this program; if not, write to the | |
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 | * Boston, MA 021110-1307, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/kernel.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/jiffies.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/fs.h> | |
27 | #include <linux/bio.h> | |
28 | #include <linux/blkdev.h> | |
29 | #include <linux/delay.h> | |
30 | #include <linux/file.h> | |
31 | #include <linux/kthread.h> | |
32 | #include <linux/configfs.h> | |
33 | #include <linux/random.h> | |
34 | #include <linux/crc32.h> | |
35 | #include <linux/time.h> | |
87d3d3f3 | 36 | #include <linux/debugfs.h> |
5a0e3ad6 | 37 | #include <linux/slab.h> |
a7f6a5fb MF |
38 | |
39 | #include "heartbeat.h" | |
40 | #include "tcp.h" | |
41 | #include "nodemanager.h" | |
42 | #include "quorum.h" | |
43 | ||
44 | #include "masklog.h" | |
45 | ||
46 | ||
47 | /* | |
48 | * The first heartbeat pass had one global thread that would serialize all hb | |
49 | * callback calls. This global serializing sem should only be removed once | |
50 | * we've made sure that all callees can deal with being called concurrently | |
51 | * from multiple hb region threads. | |
52 | */ | |
53 | static DECLARE_RWSEM(o2hb_callback_sem); | |
54 | ||
55 | /* | |
56 | * multiple hb threads are watching multiple regions. A node is live | |
57 | * whenever any of the threads sees activity from the node in its region. | |
58 | */ | |
34af946a | 59 | static DEFINE_SPINLOCK(o2hb_live_lock); |
a7f6a5fb MF |
60 | static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; |
61 | static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
62 | static LIST_HEAD(o2hb_node_events); | |
63 | static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue); | |
64 | ||
536f0741 SM |
65 | /* |
66 | * In global heartbeat, we maintain a series of region bitmaps. | |
67 | * - o2hb_region_bitmap allows us to limit the region number to max region. | |
e7d656ba | 68 | * - o2hb_live_region_bitmap tracks live regions (seen steady iterations). |
43182d2a SM |
69 | * - o2hb_quorum_region_bitmap tracks live regions that have seen all nodes |
70 | * heartbeat on it. | |
b1c5ebfb | 71 | * - o2hb_failed_region_bitmap tracks the regions that have seen io timeouts. |
536f0741 SM |
72 | */ |
73 | static unsigned long o2hb_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; | |
e7d656ba | 74 | static unsigned long o2hb_live_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; |
43182d2a | 75 | static unsigned long o2hb_quorum_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; |
b1c5ebfb | 76 | static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; |
536f0741 | 77 | |
8ca8b0bb | 78 | #define O2HB_DB_TYPE_LIVENODES 0 |
a6de0136 SM |
79 | #define O2HB_DB_TYPE_LIVEREGIONS 1 |
80 | #define O2HB_DB_TYPE_QUORUMREGIONS 2 | |
81 | #define O2HB_DB_TYPE_FAILEDREGIONS 3 | |
1f285305 SM |
82 | #define O2HB_DB_TYPE_REGION_LIVENODES 4 |
83 | #define O2HB_DB_TYPE_REGION_NUMBER 5 | |
43695d09 | 84 | #define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6 |
cb0586bd | 85 | #define O2HB_DB_TYPE_REGION_PINNED 7 |
8ca8b0bb SM |
86 | struct o2hb_debug_buf { |
87 | int db_type; | |
88 | int db_size; | |
89 | int db_len; | |
90 | void *db_data; | |
91 | }; | |
92 | ||
93 | static struct o2hb_debug_buf *o2hb_db_livenodes; | |
a6de0136 SM |
94 | static struct o2hb_debug_buf *o2hb_db_liveregions; |
95 | static struct o2hb_debug_buf *o2hb_db_quorumregions; | |
96 | static struct o2hb_debug_buf *o2hb_db_failedregions; | |
8ca8b0bb | 97 | |
87d3d3f3 SM |
98 | #define O2HB_DEBUG_DIR "o2hb" |
99 | #define O2HB_DEBUG_LIVENODES "livenodes" | |
a6de0136 SM |
100 | #define O2HB_DEBUG_LIVEREGIONS "live_regions" |
101 | #define O2HB_DEBUG_QUORUMREGIONS "quorum_regions" | |
102 | #define O2HB_DEBUG_FAILEDREGIONS "failed_regions" | |
1f285305 | 103 | #define O2HB_DEBUG_REGION_NUMBER "num" |
43695d09 | 104 | #define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms" |
cb0586bd | 105 | #define O2HB_DEBUG_REGION_PINNED "pinned" |
8ca8b0bb | 106 | |
87d3d3f3 SM |
107 | static struct dentry *o2hb_debug_dir; |
108 | static struct dentry *o2hb_debug_livenodes; | |
a6de0136 SM |
109 | static struct dentry *o2hb_debug_liveregions; |
110 | static struct dentry *o2hb_debug_quorumregions; | |
111 | static struct dentry *o2hb_debug_failedregions; | |
87d3d3f3 | 112 | |
a7f6a5fb MF |
113 | static LIST_HEAD(o2hb_all_regions); |
114 | ||
115 | static struct o2hb_callback { | |
116 | struct list_head list; | |
117 | } o2hb_callbacks[O2HB_NUM_CB]; | |
118 | ||
119 | static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); | |
120 | ||
121 | #define O2HB_DEFAULT_BLOCK_BITS 9 | |
122 | ||
54b5187b SM |
123 | enum o2hb_heartbeat_modes { |
124 | O2HB_HEARTBEAT_LOCAL = 0, | |
125 | O2HB_HEARTBEAT_GLOBAL, | |
126 | O2HB_HEARTBEAT_NUM_MODES, | |
127 | }; | |
128 | ||
129 | char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = { | |
130 | "local", /* O2HB_HEARTBEAT_LOCAL */ | |
131 | "global", /* O2HB_HEARTBEAT_GLOBAL */ | |
132 | }; | |
133 | ||
a7f6a5fb | 134 | unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; |
54b5187b | 135 | unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL; |
a7f6a5fb | 136 | |
58a3158a SM |
137 | /* |
138 | * o2hb_dependent_users tracks the number of registered callbacks that depend | |
139 | * on heartbeat. o2net and o2dlm are two entities that register this callback. | |
140 | * However only o2dlm depends on the heartbeat. It does not want the heartbeat | |
141 | * to stop while a dlm domain is still active. | |
142 | */ | |
143 | unsigned int o2hb_dependent_users; | |
144 | ||
145 | /* | |
146 | * In global heartbeat mode, all regions are pinned if there are one or more | |
147 | * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All | |
148 | * regions are unpinned if the region count exceeds the cut off or the number | |
149 | * of dependent users falls to zero. | |
150 | */ | |
151 | #define O2HB_PIN_CUT_OFF 3 | |
152 | ||
153 | /* | |
154 | * In local heartbeat mode, we assume the dlm domain name to be the same as | |
155 | * region uuid. This is true for domains created for the file system but not | |
156 | * necessarily true for userdlm domains. This is a known limitation. | |
157 | * | |
158 | * In global heartbeat mode, we pin/unpin all o2hb regions. This solution | |
159 | * works for both file system and userdlm domains. | |
160 | */ | |
161 | static int o2hb_region_pin(const char *region_uuid); | |
162 | static void o2hb_region_unpin(const char *region_uuid); | |
163 | ||
2bd63216 | 164 | /* Only sets a new threshold if there are no active regions. |
a7f6a5fb MF |
165 | * |
166 | * No locking or otherwise interesting code is required for reading | |
167 | * o2hb_dead_threshold as it can't change once regions are active and | |
168 | * it's not interesting to anyone until then anyway. */ | |
169 | static void o2hb_dead_threshold_set(unsigned int threshold) | |
170 | { | |
171 | if (threshold > O2HB_MIN_DEAD_THRESHOLD) { | |
172 | spin_lock(&o2hb_live_lock); | |
173 | if (list_empty(&o2hb_all_regions)) | |
174 | o2hb_dead_threshold = threshold; | |
175 | spin_unlock(&o2hb_live_lock); | |
176 | } | |
177 | } | |
178 | ||
54b5187b SM |
179 | static int o2hb_global_hearbeat_mode_set(unsigned int hb_mode) |
180 | { | |
181 | int ret = -1; | |
182 | ||
183 | if (hb_mode < O2HB_HEARTBEAT_NUM_MODES) { | |
184 | spin_lock(&o2hb_live_lock); | |
185 | if (list_empty(&o2hb_all_regions)) { | |
186 | o2hb_heartbeat_mode = hb_mode; | |
187 | ret = 0; | |
188 | } | |
189 | spin_unlock(&o2hb_live_lock); | |
190 | } | |
191 | ||
192 | return ret; | |
193 | } | |
194 | ||
a7f6a5fb MF |
195 | struct o2hb_node_event { |
196 | struct list_head hn_item; | |
197 | enum o2hb_callback_type hn_event_type; | |
198 | struct o2nm_node *hn_node; | |
199 | int hn_node_num; | |
200 | }; | |
201 | ||
202 | struct o2hb_disk_slot { | |
203 | struct o2hb_disk_heartbeat_block *ds_raw_block; | |
204 | u8 ds_node_num; | |
205 | u64 ds_last_time; | |
206 | u64 ds_last_generation; | |
207 | u16 ds_equal_samples; | |
208 | u16 ds_changed_samples; | |
209 | struct list_head ds_live_item; | |
210 | }; | |
211 | ||
212 | /* each thread owns a region.. when we're asked to tear down the region | |
213 | * we ask the thread to stop, who cleans up the region */ | |
214 | struct o2hb_region { | |
215 | struct config_item hr_item; | |
216 | ||
217 | struct list_head hr_all_item; | |
58a3158a | 218 | unsigned hr_unclean_stop:1, |
d2eece37 | 219 | hr_aborted_start:1, |
58a3158a SM |
220 | hr_item_pinned:1, |
221 | hr_item_dropped:1; | |
a7f6a5fb MF |
222 | |
223 | /* protected by the hr_callback_sem */ | |
224 | struct task_struct *hr_task; | |
225 | ||
226 | unsigned int hr_blocks; | |
227 | unsigned long long hr_start_block; | |
228 | ||
229 | unsigned int hr_block_bits; | |
230 | unsigned int hr_block_bytes; | |
231 | ||
232 | unsigned int hr_slots_per_page; | |
233 | unsigned int hr_num_pages; | |
234 | ||
235 | struct page **hr_slot_data; | |
236 | struct block_device *hr_bdev; | |
237 | struct o2hb_disk_slot *hr_slots; | |
238 | ||
823a637a SM |
239 | /* live node map of this region */ |
240 | unsigned long hr_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
536f0741 | 241 | unsigned int hr_region_num; |
823a637a | 242 | |
1f285305 SM |
243 | struct dentry *hr_debug_dir; |
244 | struct dentry *hr_debug_livenodes; | |
245 | struct dentry *hr_debug_regnum; | |
43695d09 | 246 | struct dentry *hr_debug_elapsed_time; |
cb0586bd | 247 | struct dentry *hr_debug_pinned; |
1f285305 SM |
248 | struct o2hb_debug_buf *hr_db_livenodes; |
249 | struct o2hb_debug_buf *hr_db_regnum; | |
43695d09 | 250 | struct o2hb_debug_buf *hr_db_elapsed_time; |
cb0586bd | 251 | struct o2hb_debug_buf *hr_db_pinned; |
1f285305 | 252 | |
a7f6a5fb MF |
253 | /* let the person setting up hb wait for it to return until it |
254 | * has reached a 'steady' state. This will be fixed when we have | |
255 | * a more complete api that doesn't lead to this sort of fragility. */ | |
256 | atomic_t hr_steady_iterations; | |
257 | ||
d2eece37 SM |
258 | /* terminate o2hb thread if it does not reach steady state |
259 | * (hr_steady_iterations == 0) within hr_unsteady_iterations */ | |
260 | atomic_t hr_unsteady_iterations; | |
261 | ||
a7f6a5fb MF |
262 | char hr_dev_name[BDEVNAME_SIZE]; |
263 | ||
264 | unsigned int hr_timeout_ms; | |
265 | ||
266 | /* randomized as the region goes up and down so that a node | |
267 | * recognizes a node going up and down in one iteration */ | |
268 | u64 hr_generation; | |
269 | ||
c4028958 | 270 | struct delayed_work hr_write_timeout_work; |
a7f6a5fb MF |
271 | unsigned long hr_last_timeout_start; |
272 | ||
273 | /* Used during o2hb_check_slot to hold a copy of the block | |
274 | * being checked because we temporarily have to zero out the | |
275 | * crc field. */ | |
276 | struct o2hb_disk_heartbeat_block *hr_tmp_block; | |
277 | }; | |
278 | ||
279 | struct o2hb_bio_wait_ctxt { | |
280 | atomic_t wc_num_reqs; | |
281 | struct completion wc_io_complete; | |
a9e2ae39 | 282 | int wc_error; |
a7f6a5fb MF |
283 | }; |
284 | ||
b1c5ebfb SM |
285 | static int o2hb_pop_count(void *map, int count) |
286 | { | |
287 | int i = -1, pop = 0; | |
288 | ||
289 | while ((i = find_next_bit(map, count, i + 1)) < count) | |
290 | pop++; | |
291 | return pop; | |
292 | } | |
293 | ||
c4028958 | 294 | static void o2hb_write_timeout(struct work_struct *work) |
a7f6a5fb | 295 | { |
b1c5ebfb SM |
296 | int failed, quorum; |
297 | unsigned long flags; | |
c4028958 DH |
298 | struct o2hb_region *reg = |
299 | container_of(work, struct o2hb_region, | |
300 | hr_write_timeout_work.work); | |
a7f6a5fb MF |
301 | |
302 | mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " | |
303 | "milliseconds\n", reg->hr_dev_name, | |
2bd63216 | 304 | jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); |
b1c5ebfb SM |
305 | |
306 | if (o2hb_global_heartbeat_active()) { | |
307 | spin_lock_irqsave(&o2hb_live_lock, flags); | |
308 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | |
309 | set_bit(reg->hr_region_num, o2hb_failed_region_bitmap); | |
310 | failed = o2hb_pop_count(&o2hb_failed_region_bitmap, | |
311 | O2NM_MAX_REGIONS); | |
312 | quorum = o2hb_pop_count(&o2hb_quorum_region_bitmap, | |
313 | O2NM_MAX_REGIONS); | |
314 | spin_unlock_irqrestore(&o2hb_live_lock, flags); | |
315 | ||
316 | mlog(ML_HEARTBEAT, "Number of regions %d, failed regions %d\n", | |
317 | quorum, failed); | |
318 | ||
319 | /* | |
320 | * Fence if the number of failed regions >= half the number | |
321 | * of quorum regions | |
322 | */ | |
323 | if ((failed << 1) < quorum) | |
324 | return; | |
325 | } | |
326 | ||
a7f6a5fb MF |
327 | o2quo_disk_timeout(); |
328 | } | |
329 | ||
330 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) | |
331 | { | |
d2eece37 SM |
332 | /* Arm writeout only after thread reaches steady state */ |
333 | if (atomic_read(®->hr_steady_iterations) != 0) | |
334 | return; | |
335 | ||
b31d308d TM |
336 | mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", |
337 | O2HB_MAX_WRITE_TIMEOUT_MS); | |
a7f6a5fb | 338 | |
b1c5ebfb SM |
339 | if (o2hb_global_heartbeat_active()) { |
340 | spin_lock(&o2hb_live_lock); | |
341 | clear_bit(reg->hr_region_num, o2hb_failed_region_bitmap); | |
342 | spin_unlock(&o2hb_live_lock); | |
343 | } | |
a7f6a5fb MF |
344 | cancel_delayed_work(®->hr_write_timeout_work); |
345 | reg->hr_last_timeout_start = jiffies; | |
346 | schedule_delayed_work(®->hr_write_timeout_work, | |
347 | msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)); | |
348 | } | |
349 | ||
350 | static void o2hb_disarm_write_timeout(struct o2hb_region *reg) | |
351 | { | |
9b00a818 | 352 | cancel_delayed_work_sync(®->hr_write_timeout_work); |
a7f6a5fb MF |
353 | } |
354 | ||
b559292e | 355 | static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc) |
a7f6a5fb | 356 | { |
b559292e | 357 | atomic_set(&wc->wc_num_reqs, 1); |
a7f6a5fb | 358 | init_completion(&wc->wc_io_complete); |
a9e2ae39 | 359 | wc->wc_error = 0; |
a7f6a5fb MF |
360 | } |
361 | ||
362 | /* Used in error paths too */ | |
363 | static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc, | |
364 | unsigned int num) | |
365 | { | |
366 | /* sadly atomic_sub_and_test() isn't available on all platforms. The | |
367 | * good news is that the fast path only completes one at a time */ | |
368 | while(num--) { | |
369 | if (atomic_dec_and_test(&wc->wc_num_reqs)) { | |
370 | BUG_ON(num > 0); | |
371 | complete(&wc->wc_io_complete); | |
372 | } | |
373 | } | |
374 | } | |
375 | ||
376 | static void o2hb_wait_on_io(struct o2hb_region *reg, | |
377 | struct o2hb_bio_wait_ctxt *wc) | |
378 | { | |
b559292e | 379 | o2hb_bio_wait_dec(wc, 1); |
a7f6a5fb MF |
380 | wait_for_completion(&wc->wc_io_complete); |
381 | } | |
382 | ||
782e3b3b | 383 | static void o2hb_bio_end_io(struct bio *bio, |
a7f6a5fb MF |
384 | int error) |
385 | { | |
386 | struct o2hb_bio_wait_ctxt *wc = bio->bi_private; | |
387 | ||
a9e2ae39 | 388 | if (error) { |
a7f6a5fb | 389 | mlog(ML_ERROR, "IO Error %d\n", error); |
a9e2ae39 MF |
390 | wc->wc_error = error; |
391 | } | |
a7f6a5fb | 392 | |
a7f6a5fb | 393 | o2hb_bio_wait_dec(wc, 1); |
b559292e | 394 | bio_put(bio); |
a7f6a5fb MF |
395 | } |
396 | ||
397 | /* Setup a Bio to cover I/O against num_slots slots starting at | |
398 | * start_slot. */ | |
399 | static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, | |
400 | struct o2hb_bio_wait_ctxt *wc, | |
b559292e PR |
401 | unsigned int *current_slot, |
402 | unsigned int max_slots) | |
a7f6a5fb | 403 | { |
b559292e | 404 | int len, current_page; |
a7f6a5fb MF |
405 | unsigned int vec_len, vec_start; |
406 | unsigned int bits = reg->hr_block_bits; | |
407 | unsigned int spp = reg->hr_slots_per_page; | |
b559292e | 408 | unsigned int cs = *current_slot; |
a7f6a5fb MF |
409 | struct bio *bio; |
410 | struct page *page; | |
411 | ||
a7f6a5fb MF |
412 | /* Testing has shown this allocation to take long enough under |
413 | * GFP_KERNEL that the local node can get fenced. It would be | |
414 | * nicest if we could pre-allocate these bios and avoid this | |
415 | * all together. */ | |
b559292e | 416 | bio = bio_alloc(GFP_ATOMIC, 16); |
a7f6a5fb MF |
417 | if (!bio) { |
418 | mlog(ML_ERROR, "Could not alloc slots BIO!\n"); | |
419 | bio = ERR_PTR(-ENOMEM); | |
420 | goto bail; | |
421 | } | |
422 | ||
423 | /* Must put everything in 512 byte sectors for the bio... */ | |
b559292e | 424 | bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); |
a7f6a5fb MF |
425 | bio->bi_bdev = reg->hr_bdev; |
426 | bio->bi_private = wc; | |
427 | bio->bi_end_io = o2hb_bio_end_io; | |
428 | ||
b559292e PR |
429 | vec_start = (cs << bits) % PAGE_CACHE_SIZE; |
430 | while(cs < max_slots) { | |
431 | current_page = cs / spp; | |
432 | page = reg->hr_slot_data[current_page]; | |
a7f6a5fb | 433 | |
bc7e97cb | 434 | vec_len = min(PAGE_CACHE_SIZE - vec_start, |
b559292e | 435 | (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); |
a7f6a5fb MF |
436 | |
437 | mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", | |
b559292e | 438 | current_page, vec_len, vec_start); |
a7f6a5fb MF |
439 | |
440 | len = bio_add_page(bio, page, vec_len, vec_start); | |
b559292e | 441 | if (len != vec_len) break; |
a7f6a5fb | 442 | |
b559292e | 443 | cs += vec_len / (PAGE_CACHE_SIZE/spp); |
a7f6a5fb MF |
444 | vec_start = 0; |
445 | } | |
446 | ||
447 | bail: | |
b559292e | 448 | *current_slot = cs; |
a7f6a5fb MF |
449 | return bio; |
450 | } | |
451 | ||
a7f6a5fb MF |
452 | static int o2hb_read_slots(struct o2hb_region *reg, |
453 | unsigned int max_slots) | |
454 | { | |
b559292e PR |
455 | unsigned int current_slot=0; |
456 | int status; | |
a7f6a5fb | 457 | struct o2hb_bio_wait_ctxt wc; |
a7f6a5fb MF |
458 | struct bio *bio; |
459 | ||
b559292e | 460 | o2hb_bio_wait_init(&wc); |
a7f6a5fb | 461 | |
b559292e PR |
462 | while(current_slot < max_slots) { |
463 | bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots); | |
a7f6a5fb | 464 | if (IS_ERR(bio)) { |
a7f6a5fb MF |
465 | status = PTR_ERR(bio); |
466 | mlog_errno(status); | |
467 | goto bail_and_wait; | |
468 | } | |
a7f6a5fb | 469 | |
b559292e | 470 | atomic_inc(&wc.wc_num_reqs); |
a7f6a5fb MF |
471 | submit_bio(READ, bio); |
472 | } | |
473 | ||
474 | status = 0; | |
475 | ||
476 | bail_and_wait: | |
477 | o2hb_wait_on_io(reg, &wc); | |
a9e2ae39 MF |
478 | if (wc.wc_error && !status) |
479 | status = wc.wc_error; | |
a7f6a5fb | 480 | |
a7f6a5fb MF |
481 | return status; |
482 | } | |
483 | ||
484 | static int o2hb_issue_node_write(struct o2hb_region *reg, | |
a7f6a5fb MF |
485 | struct o2hb_bio_wait_ctxt *write_wc) |
486 | { | |
487 | int status; | |
488 | unsigned int slot; | |
489 | struct bio *bio; | |
490 | ||
b559292e | 491 | o2hb_bio_wait_init(write_wc); |
a7f6a5fb MF |
492 | |
493 | slot = o2nm_this_node(); | |
494 | ||
b559292e | 495 | bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1); |
a7f6a5fb MF |
496 | if (IS_ERR(bio)) { |
497 | status = PTR_ERR(bio); | |
498 | mlog_errno(status); | |
499 | goto bail; | |
500 | } | |
501 | ||
b559292e | 502 | atomic_inc(&write_wc->wc_num_reqs); |
a7f6a5fb MF |
503 | submit_bio(WRITE, bio); |
504 | ||
a7f6a5fb MF |
505 | status = 0; |
506 | bail: | |
507 | return status; | |
508 | } | |
509 | ||
510 | static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg, | |
511 | struct o2hb_disk_heartbeat_block *hb_block) | |
512 | { | |
513 | __le32 old_cksum; | |
514 | u32 ret; | |
515 | ||
516 | /* We want to compute the block crc with a 0 value in the | |
517 | * hb_cksum field. Save it off here and replace after the | |
518 | * crc. */ | |
519 | old_cksum = hb_block->hb_cksum; | |
520 | hb_block->hb_cksum = 0; | |
521 | ||
522 | ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes); | |
523 | ||
524 | hb_block->hb_cksum = old_cksum; | |
525 | ||
526 | return ret; | |
527 | } | |
528 | ||
529 | static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block) | |
530 | { | |
70bacbdb MF |
531 | mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, " |
532 | "cksum = 0x%x, generation 0x%llx\n", | |
533 | (long long)le64_to_cpu(hb_block->hb_seq), | |
534 | hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum), | |
535 | (long long)le64_to_cpu(hb_block->hb_generation)); | |
a7f6a5fb MF |
536 | } |
537 | ||
538 | static int o2hb_verify_crc(struct o2hb_region *reg, | |
539 | struct o2hb_disk_heartbeat_block *hb_block) | |
540 | { | |
541 | u32 read, computed; | |
542 | ||
543 | read = le32_to_cpu(hb_block->hb_cksum); | |
544 | computed = o2hb_compute_block_crc_le(reg, hb_block); | |
545 | ||
546 | return read == computed; | |
547 | } | |
548 | ||
d2eece37 SM |
549 | /* |
550 | * Compare the slot data with what we wrote in the last iteration. | |
551 | * If the match fails, print an appropriate error message. This is to | |
552 | * detect errors like... another node hearting on the same slot, | |
553 | * flaky device that is losing writes, etc. | |
554 | * Returns 1 if check succeeds, 0 otherwise. | |
555 | */ | |
556 | static int o2hb_check_own_slot(struct o2hb_region *reg) | |
a7f6a5fb | 557 | { |
a7f6a5fb MF |
558 | struct o2hb_disk_slot *slot; |
559 | struct o2hb_disk_heartbeat_block *hb_block; | |
33c12a54 | 560 | char *errstr; |
a7f6a5fb | 561 | |
33c12a54 | 562 | slot = ®->hr_slots[o2nm_this_node()]; |
a7f6a5fb | 563 | /* Don't check on our 1st timestamp */ |
33c12a54 | 564 | if (!slot->ds_last_time) |
d2eece37 | 565 | return 0; |
a7f6a5fb | 566 | |
33c12a54 SM |
567 | hb_block = slot->ds_raw_block; |
568 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && | |
569 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && | |
570 | hb_block->hb_node == slot->ds_node_num) | |
d2eece37 | 571 | return 1; |
a7f6a5fb | 572 | |
33c12a54 SM |
573 | #define ERRSTR1 "Another node is heartbeating on device" |
574 | #define ERRSTR2 "Heartbeat generation mismatch on device" | |
575 | #define ERRSTR3 "Heartbeat sequence mismatch on device" | |
576 | ||
577 | if (hb_block->hb_node != slot->ds_node_num) | |
578 | errstr = ERRSTR1; | |
579 | else if (le64_to_cpu(hb_block->hb_generation) != | |
580 | slot->ds_last_generation) | |
581 | errstr = ERRSTR2; | |
582 | else | |
583 | errstr = ERRSTR3; | |
584 | ||
585 | mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " | |
586 | "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, | |
587 | slot->ds_node_num, (unsigned long long)slot->ds_last_generation, | |
588 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, | |
589 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), | |
590 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); | |
d2eece37 SM |
591 | |
592 | return 0; | |
a7f6a5fb MF |
593 | } |
594 | ||
595 | static inline void o2hb_prepare_block(struct o2hb_region *reg, | |
596 | u64 generation) | |
597 | { | |
598 | int node_num; | |
599 | u64 cputime; | |
600 | struct o2hb_disk_slot *slot; | |
601 | struct o2hb_disk_heartbeat_block *hb_block; | |
602 | ||
603 | node_num = o2nm_this_node(); | |
604 | slot = ®->hr_slots[node_num]; | |
605 | ||
606 | hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block; | |
607 | memset(hb_block, 0, reg->hr_block_bytes); | |
608 | /* TODO: time stuff */ | |
609 | cputime = CURRENT_TIME.tv_sec; | |
610 | if (!cputime) | |
611 | cputime = 1; | |
612 | ||
613 | hb_block->hb_seq = cpu_to_le64(cputime); | |
614 | hb_block->hb_node = node_num; | |
615 | hb_block->hb_generation = cpu_to_le64(generation); | |
0db638f4 | 616 | hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS); |
a7f6a5fb MF |
617 | |
618 | /* This step must always happen last! */ | |
619 | hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, | |
620 | hb_block)); | |
621 | ||
70bacbdb | 622 | mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n", |
5fdf1e67 | 623 | (long long)generation, |
70bacbdb | 624 | le32_to_cpu(hb_block->hb_cksum)); |
a7f6a5fb MF |
625 | } |
626 | ||
627 | static void o2hb_fire_callbacks(struct o2hb_callback *hbcall, | |
628 | struct o2nm_node *node, | |
629 | int idx) | |
630 | { | |
631 | struct list_head *iter; | |
632 | struct o2hb_callback_func *f; | |
633 | ||
634 | list_for_each(iter, &hbcall->list) { | |
635 | f = list_entry(iter, struct o2hb_callback_func, hc_item); | |
636 | mlog(ML_HEARTBEAT, "calling funcs %p\n", f); | |
637 | (f->hc_func)(node, idx, f->hc_data); | |
638 | } | |
639 | } | |
640 | ||
641 | /* Will run the list in order until we process the passed event */ | |
642 | static void o2hb_run_event_list(struct o2hb_node_event *queued_event) | |
643 | { | |
644 | int empty; | |
645 | struct o2hb_callback *hbcall; | |
646 | struct o2hb_node_event *event; | |
647 | ||
648 | spin_lock(&o2hb_live_lock); | |
649 | empty = list_empty(&queued_event->hn_item); | |
650 | spin_unlock(&o2hb_live_lock); | |
651 | if (empty) | |
652 | return; | |
653 | ||
654 | /* Holding callback sem assures we don't alter the callback | |
655 | * lists when doing this, and serializes ourselves with other | |
656 | * processes wanting callbacks. */ | |
657 | down_write(&o2hb_callback_sem); | |
658 | ||
659 | spin_lock(&o2hb_live_lock); | |
660 | while (!list_empty(&o2hb_node_events) | |
661 | && !list_empty(&queued_event->hn_item)) { | |
662 | event = list_entry(o2hb_node_events.next, | |
663 | struct o2hb_node_event, | |
664 | hn_item); | |
665 | list_del_init(&event->hn_item); | |
666 | spin_unlock(&o2hb_live_lock); | |
667 | ||
668 | mlog(ML_HEARTBEAT, "Node %s event for %d\n", | |
669 | event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN", | |
670 | event->hn_node_num); | |
671 | ||
672 | hbcall = hbcall_from_type(event->hn_event_type); | |
673 | ||
674 | /* We should *never* have gotten on to the list with a | |
675 | * bad type... This isn't something that we should try | |
676 | * to recover from. */ | |
677 | BUG_ON(IS_ERR(hbcall)); | |
678 | ||
679 | o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num); | |
680 | ||
681 | spin_lock(&o2hb_live_lock); | |
682 | } | |
683 | spin_unlock(&o2hb_live_lock); | |
684 | ||
685 | up_write(&o2hb_callback_sem); | |
686 | } | |
687 | ||
688 | static void o2hb_queue_node_event(struct o2hb_node_event *event, | |
689 | enum o2hb_callback_type type, | |
690 | struct o2nm_node *node, | |
691 | int node_num) | |
692 | { | |
693 | assert_spin_locked(&o2hb_live_lock); | |
694 | ||
0e105d37 SM |
695 | BUG_ON((!node) && (type != O2HB_NODE_DOWN_CB)); |
696 | ||
a7f6a5fb MF |
697 | event->hn_event_type = type; |
698 | event->hn_node = node; | |
699 | event->hn_node_num = node_num; | |
700 | ||
701 | mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n", | |
702 | type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num); | |
703 | ||
704 | list_add_tail(&event->hn_item, &o2hb_node_events); | |
705 | } | |
706 | ||
707 | static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) | |
708 | { | |
709 | struct o2hb_node_event event = | |
710 | { .hn_item = LIST_HEAD_INIT(event.hn_item), }; | |
711 | struct o2nm_node *node; | |
712 | ||
713 | node = o2nm_get_node_by_num(slot->ds_node_num); | |
714 | if (!node) | |
715 | return; | |
716 | ||
717 | spin_lock(&o2hb_live_lock); | |
718 | if (!list_empty(&slot->ds_live_item)) { | |
719 | mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n", | |
720 | slot->ds_node_num); | |
721 | ||
722 | list_del_init(&slot->ds_live_item); | |
723 | ||
724 | if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { | |
725 | clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); | |
726 | ||
727 | o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, | |
728 | slot->ds_node_num); | |
729 | } | |
730 | } | |
731 | spin_unlock(&o2hb_live_lock); | |
732 | ||
733 | o2hb_run_event_list(&event); | |
734 | ||
735 | o2nm_node_put(node); | |
736 | } | |
737 | ||
d2eece37 | 738 | static void o2hb_set_quorum_device(struct o2hb_region *reg) |
43182d2a | 739 | { |
43182d2a SM |
740 | if (!o2hb_global_heartbeat_active()) |
741 | return; | |
742 | ||
d2eece37 SM |
743 | /* Prevent race with o2hb_heartbeat_group_drop_item() */ |
744 | if (kthread_should_stop()) | |
43182d2a SM |
745 | return; |
746 | ||
d2eece37 SM |
747 | /* Tag region as quorum only after thread reaches steady state */ |
748 | if (atomic_read(®->hr_steady_iterations) != 0) | |
749 | return; | |
750 | ||
751 | spin_lock(&o2hb_live_lock); | |
752 | ||
753 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | |
754 | goto unlock; | |
755 | ||
43182d2a SM |
756 | /* |
757 | * A region can be added to the quorum only when it sees all | |
758 | * live nodes heartbeat on it. In other words, the region has been | |
759 | * added to all nodes. | |
760 | */ | |
761 | if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, | |
762 | sizeof(o2hb_live_node_bitmap))) | |
d2eece37 | 763 | goto unlock; |
43182d2a | 764 | |
d2eece37 SM |
765 | printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n", |
766 | config_item_name(®->hr_item), reg->hr_dev_name); | |
43182d2a SM |
767 | |
768 | set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | |
58a3158a SM |
769 | |
770 | /* | |
771 | * If global heartbeat active, unpin all regions if the | |
772 | * region count > CUT_OFF | |
773 | */ | |
774 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | |
775 | O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) | |
776 | o2hb_region_unpin(NULL); | |
d2eece37 SM |
777 | unlock: |
778 | spin_unlock(&o2hb_live_lock); | |
43182d2a SM |
779 | } |
780 | ||
a7f6a5fb MF |
781 | static int o2hb_check_slot(struct o2hb_region *reg, |
782 | struct o2hb_disk_slot *slot) | |
783 | { | |
784 | int changed = 0, gen_changed = 0; | |
785 | struct o2hb_node_event event = | |
786 | { .hn_item = LIST_HEAD_INIT(event.hn_item), }; | |
787 | struct o2nm_node *node; | |
788 | struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; | |
789 | u64 cputime; | |
0db638f4 MF |
790 | unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; |
791 | unsigned int slot_dead_ms; | |
0e105d37 | 792 | int tmp; |
a7f6a5fb MF |
793 | |
794 | memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); | |
795 | ||
0e105d37 SM |
796 | /* |
797 | * If a node is no longer configured but is still in the livemap, we | |
798 | * may need to clear that bit from the livemap. | |
799 | */ | |
a7f6a5fb | 800 | node = o2nm_get_node_by_num(slot->ds_node_num); |
0e105d37 SM |
801 | if (!node) { |
802 | spin_lock(&o2hb_live_lock); | |
803 | tmp = test_bit(slot->ds_node_num, o2hb_live_node_bitmap); | |
804 | spin_unlock(&o2hb_live_lock); | |
805 | if (!tmp) | |
806 | return 0; | |
807 | } | |
a7f6a5fb MF |
808 | |
809 | if (!o2hb_verify_crc(reg, hb_block)) { | |
810 | /* all paths from here will drop o2hb_live_lock for | |
811 | * us. */ | |
812 | spin_lock(&o2hb_live_lock); | |
813 | ||
814 | /* Don't print an error on the console in this case - | |
815 | * a freshly formatted heartbeat area will not have a | |
816 | * crc set on it. */ | |
817 | if (list_empty(&slot->ds_live_item)) | |
818 | goto out; | |
819 | ||
820 | /* The node is live but pushed out a bad crc. We | |
821 | * consider it a transient miss but don't populate any | |
822 | * other values as they may be junk. */ | |
823 | mlog(ML_ERROR, "Node %d has written a bad crc to %s\n", | |
824 | slot->ds_node_num, reg->hr_dev_name); | |
825 | o2hb_dump_slot(hb_block); | |
826 | ||
827 | slot->ds_equal_samples++; | |
828 | goto fire_callbacks; | |
829 | } | |
830 | ||
831 | /* we don't care if these wrap.. the state transitions below | |
832 | * clear at the right places */ | |
833 | cputime = le64_to_cpu(hb_block->hb_seq); | |
834 | if (slot->ds_last_time != cputime) | |
835 | slot->ds_changed_samples++; | |
836 | else | |
837 | slot->ds_equal_samples++; | |
838 | slot->ds_last_time = cputime; | |
839 | ||
840 | /* The node changed heartbeat generations. We assume this to | |
841 | * mean it dropped off but came back before we timed out. We | |
842 | * want to consider it down for the time being but don't want | |
843 | * to lose any changed_samples state we might build up to | |
844 | * considering it live again. */ | |
845 | if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) { | |
846 | gen_changed = 1; | |
847 | slot->ds_equal_samples = 0; | |
70bacbdb MF |
848 | mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx " |
849 | "to 0x%llx)\n", slot->ds_node_num, | |
850 | (long long)slot->ds_last_generation, | |
851 | (long long)le64_to_cpu(hb_block->hb_generation)); | |
a7f6a5fb MF |
852 | } |
853 | ||
854 | slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); | |
855 | ||
70bacbdb MF |
856 | mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x " |
857 | "seq %llu last %llu changed %u equal %u\n", | |
858 | slot->ds_node_num, (long long)slot->ds_last_generation, | |
859 | le32_to_cpu(hb_block->hb_cksum), | |
2bd63216 | 860 | (unsigned long long)le64_to_cpu(hb_block->hb_seq), |
70bacbdb | 861 | (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, |
a7f6a5fb MF |
862 | slot->ds_equal_samples); |
863 | ||
864 | spin_lock(&o2hb_live_lock); | |
865 | ||
866 | fire_callbacks: | |
867 | /* dead nodes only come to life after some number of | |
868 | * changes at any time during their dead time */ | |
869 | if (list_empty(&slot->ds_live_item) && | |
870 | slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) { | |
70bacbdb MF |
871 | mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n", |
872 | slot->ds_node_num, (long long)slot->ds_last_generation); | |
a7f6a5fb | 873 | |
823a637a SM |
874 | set_bit(slot->ds_node_num, reg->hr_live_node_bitmap); |
875 | ||
a7f6a5fb MF |
876 | /* first on the list generates a callback */ |
877 | if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { | |
d6aa1c7c SM |
878 | mlog(ML_HEARTBEAT, "o2hb: Add node %d to live nodes " |
879 | "bitmap\n", slot->ds_node_num); | |
a7f6a5fb MF |
880 | set_bit(slot->ds_node_num, o2hb_live_node_bitmap); |
881 | ||
882 | o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node, | |
883 | slot->ds_node_num); | |
884 | ||
885 | changed = 1; | |
886 | } | |
887 | ||
888 | list_add_tail(&slot->ds_live_item, | |
889 | &o2hb_live_slots[slot->ds_node_num]); | |
890 | ||
891 | slot->ds_equal_samples = 0; | |
0db638f4 MF |
892 | |
893 | /* We want to be sure that all nodes agree on the | |
894 | * number of milliseconds before a node will be | |
895 | * considered dead. The self-fencing timeout is | |
896 | * computed from this value, and a discrepancy might | |
897 | * result in heartbeat calling a node dead when it | |
898 | * hasn't self-fenced yet. */ | |
899 | slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms); | |
900 | if (slot_dead_ms && slot_dead_ms != dead_ms) { | |
901 | /* TODO: Perhaps we can fail the region here. */ | |
902 | mlog(ML_ERROR, "Node %d on device %s has a dead count " | |
903 | "of %u ms, but our count is %u ms.\n" | |
904 | "Please double check your configuration values " | |
905 | "for 'O2CB_HEARTBEAT_THRESHOLD'\n", | |
906 | slot->ds_node_num, reg->hr_dev_name, slot_dead_ms, | |
907 | dead_ms); | |
908 | } | |
a7f6a5fb MF |
909 | goto out; |
910 | } | |
911 | ||
912 | /* if the list is dead, we're done.. */ | |
913 | if (list_empty(&slot->ds_live_item)) | |
914 | goto out; | |
915 | ||
916 | /* live nodes only go dead after enough consequtive missed | |
917 | * samples.. reset the missed counter whenever we see | |
918 | * activity */ | |
919 | if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { | |
920 | mlog(ML_HEARTBEAT, "Node %d left my region\n", | |
921 | slot->ds_node_num); | |
922 | ||
823a637a SM |
923 | clear_bit(slot->ds_node_num, reg->hr_live_node_bitmap); |
924 | ||
a7f6a5fb MF |
925 | /* last off the live_slot generates a callback */ |
926 | list_del_init(&slot->ds_live_item); | |
927 | if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { | |
d6aa1c7c SM |
928 | mlog(ML_HEARTBEAT, "o2hb: Remove node %d from live " |
929 | "nodes bitmap\n", slot->ds_node_num); | |
a7f6a5fb MF |
930 | clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); |
931 | ||
0e105d37 SM |
932 | /* node can be null */ |
933 | o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, | |
934 | node, slot->ds_node_num); | |
a7f6a5fb MF |
935 | |
936 | changed = 1; | |
937 | } | |
938 | ||
939 | /* We don't clear this because the node is still | |
940 | * actually writing new blocks. */ | |
941 | if (!gen_changed) | |
942 | slot->ds_changed_samples = 0; | |
943 | goto out; | |
944 | } | |
945 | if (slot->ds_changed_samples) { | |
946 | slot->ds_changed_samples = 0; | |
947 | slot->ds_equal_samples = 0; | |
948 | } | |
949 | out: | |
950 | spin_unlock(&o2hb_live_lock); | |
951 | ||
952 | o2hb_run_event_list(&event); | |
953 | ||
0e105d37 SM |
954 | if (node) |
955 | o2nm_node_put(node); | |
a7f6a5fb MF |
956 | return changed; |
957 | } | |
958 | ||
959 | /* This could be faster if we just implmented a find_last_bit, but I | |
960 | * don't think the circumstances warrant it. */ | |
961 | static int o2hb_highest_node(unsigned long *nodes, | |
962 | int numbits) | |
963 | { | |
964 | int highest, node; | |
965 | ||
966 | highest = numbits; | |
967 | node = -1; | |
968 | while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) { | |
969 | if (node >= numbits) | |
970 | break; | |
971 | ||
972 | highest = node; | |
973 | } | |
974 | ||
975 | return highest; | |
976 | } | |
977 | ||
a9e2ae39 | 978 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) |
a7f6a5fb | 979 | { |
d2eece37 SM |
980 | int i, ret, highest_node; |
981 | int membership_change = 0, own_slot_ok = 0; | |
a7f6a5fb | 982 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
0e105d37 | 983 | unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
a7f6a5fb MF |
984 | struct o2hb_bio_wait_ctxt write_wc; |
985 | ||
a9e2ae39 MF |
986 | ret = o2nm_configured_node_map(configured_nodes, |
987 | sizeof(configured_nodes)); | |
988 | if (ret) { | |
989 | mlog_errno(ret); | |
d2eece37 | 990 | goto bail; |
a9e2ae39 | 991 | } |
a7f6a5fb | 992 | |
0e105d37 SM |
993 | /* |
994 | * If a node is not configured but is in the livemap, we still need | |
995 | * to read the slot so as to be able to remove it from the livemap. | |
996 | */ | |
997 | o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap)); | |
998 | i = -1; | |
999 | while ((i = find_next_bit(live_node_bitmap, | |
1000 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | |
1001 | set_bit(i, configured_nodes); | |
1002 | } | |
1003 | ||
a7f6a5fb MF |
1004 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); |
1005 | if (highest_node >= O2NM_MAX_NODES) { | |
d2eece37 SM |
1006 | mlog(ML_NOTICE, "o2hb: No configured nodes found!\n"); |
1007 | ret = -EINVAL; | |
1008 | goto bail; | |
a7f6a5fb MF |
1009 | } |
1010 | ||
1011 | /* No sense in reading the slots of nodes that don't exist | |
1012 | * yet. Of course, if the node definitions have holes in them | |
1013 | * then we're reading an empty slot anyway... Consider this | |
1014 | * best-effort. */ | |
1015 | ret = o2hb_read_slots(reg, highest_node + 1); | |
1016 | if (ret < 0) { | |
1017 | mlog_errno(ret); | |
d2eece37 | 1018 | goto bail; |
a7f6a5fb MF |
1019 | } |
1020 | ||
1021 | /* With an up to date view of the slots, we can check that no | |
1022 | * other node has been improperly configured to heartbeat in | |
1023 | * our slot. */ | |
d2eece37 | 1024 | own_slot_ok = o2hb_check_own_slot(reg); |
a7f6a5fb MF |
1025 | |
1026 | /* fill in the proper info for our next heartbeat */ | |
1027 | o2hb_prepare_block(reg, reg->hr_generation); | |
1028 | ||
b559292e | 1029 | ret = o2hb_issue_node_write(reg, &write_wc); |
a7f6a5fb MF |
1030 | if (ret < 0) { |
1031 | mlog_errno(ret); | |
d2eece37 | 1032 | goto bail; |
a7f6a5fb MF |
1033 | } |
1034 | ||
1035 | i = -1; | |
33c12a54 SM |
1036 | while((i = find_next_bit(configured_nodes, |
1037 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | |
d2eece37 | 1038 | membership_change |= o2hb_check_slot(reg, ®->hr_slots[i]); |
a7f6a5fb MF |
1039 | } |
1040 | ||
1041 | /* | |
1042 | * We have to be sure we've advertised ourselves on disk | |
1043 | * before we can go to steady state. This ensures that | |
1044 | * people we find in our steady state have seen us. | |
1045 | */ | |
1046 | o2hb_wait_on_io(reg, &write_wc); | |
a9e2ae39 MF |
1047 | if (write_wc.wc_error) { |
1048 | /* Do not re-arm the write timeout on I/O error - we | |
1049 | * can't be sure that the new block ever made it to | |
1050 | * disk */ | |
1051 | mlog(ML_ERROR, "Write error %d on device \"%s\"\n", | |
1052 | write_wc.wc_error, reg->hr_dev_name); | |
d2eece37 SM |
1053 | ret = write_wc.wc_error; |
1054 | goto bail; | |
a9e2ae39 MF |
1055 | } |
1056 | ||
d2eece37 SM |
1057 | /* Skip disarming the timeout if own slot has stale/bad data */ |
1058 | if (own_slot_ok) { | |
1059 | o2hb_set_quorum_device(reg); | |
1060 | o2hb_arm_write_timeout(reg); | |
1061 | } | |
a7f6a5fb | 1062 | |
d2eece37 | 1063 | bail: |
a7f6a5fb | 1064 | /* let the person who launched us know when things are steady */ |
d2eece37 SM |
1065 | if (atomic_read(®->hr_steady_iterations) != 0) { |
1066 | if (!ret && own_slot_ok && !membership_change) { | |
1067 | if (atomic_dec_and_test(®->hr_steady_iterations)) | |
1068 | wake_up(&o2hb_steady_queue); | |
1069 | } | |
1070 | } | |
1071 | ||
1072 | if (atomic_read(®->hr_steady_iterations) != 0) { | |
1073 | if (atomic_dec_and_test(®->hr_unsteady_iterations)) { | |
1074 | printk(KERN_NOTICE "o2hb: Unable to stabilize " | |
1075 | "heartbeart on region %s (%s)\n", | |
1076 | config_item_name(®->hr_item), | |
1077 | reg->hr_dev_name); | |
1078 | atomic_set(®->hr_steady_iterations, 0); | |
1079 | reg->hr_aborted_start = 1; | |
a7f6a5fb | 1080 | wake_up(&o2hb_steady_queue); |
d2eece37 SM |
1081 | ret = -EIO; |
1082 | } | |
a7f6a5fb | 1083 | } |
a9e2ae39 | 1084 | |
d2eece37 | 1085 | return ret; |
a7f6a5fb MF |
1086 | } |
1087 | ||
1088 | /* Subtract b from a, storing the result in a. a *must* have a larger | |
1089 | * value than b. */ | |
1090 | static void o2hb_tv_subtract(struct timeval *a, | |
1091 | struct timeval *b) | |
1092 | { | |
1093 | /* just return 0 when a is after b */ | |
1094 | if (a->tv_sec < b->tv_sec || | |
1095 | (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) { | |
1096 | a->tv_sec = 0; | |
1097 | a->tv_usec = 0; | |
1098 | return; | |
1099 | } | |
1100 | ||
1101 | a->tv_sec -= b->tv_sec; | |
1102 | a->tv_usec -= b->tv_usec; | |
1103 | while ( a->tv_usec < 0 ) { | |
1104 | a->tv_sec--; | |
1105 | a->tv_usec += 1000000; | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | static unsigned int o2hb_elapsed_msecs(struct timeval *start, | |
1110 | struct timeval *end) | |
1111 | { | |
1112 | struct timeval res = *end; | |
1113 | ||
1114 | o2hb_tv_subtract(&res, start); | |
1115 | ||
1116 | return res.tv_sec * 1000 + res.tv_usec / 1000; | |
1117 | } | |
1118 | ||
1119 | /* | |
1120 | * we ride the region ref that the region dir holds. before the region | |
1121 | * dir is removed and drops it ref it will wait to tear down this | |
1122 | * thread. | |
1123 | */ | |
1124 | static int o2hb_thread(void *data) | |
1125 | { | |
1126 | int i, ret; | |
1127 | struct o2hb_region *reg = data; | |
a7f6a5fb MF |
1128 | struct o2hb_bio_wait_ctxt write_wc; |
1129 | struct timeval before_hb, after_hb; | |
1130 | unsigned int elapsed_msec; | |
1131 | ||
1132 | mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n"); | |
1133 | ||
1134 | set_user_nice(current, -20); | |
1135 | ||
cfc069d3 SM |
1136 | /* Pin node */ |
1137 | o2nm_depend_this_node(); | |
1138 | ||
d2eece37 SM |
1139 | while (!kthread_should_stop() && |
1140 | !reg->hr_unclean_stop && !reg->hr_aborted_start) { | |
a7f6a5fb | 1141 | /* We track the time spent inside |
025dfdaf | 1142 | * o2hb_do_disk_heartbeat so that we avoid more than |
a7f6a5fb MF |
1143 | * hr_timeout_ms between disk writes. On busy systems |
1144 | * this should result in a heartbeat which is less | |
1145 | * likely to time itself out. */ | |
1146 | do_gettimeofday(&before_hb); | |
1147 | ||
d2eece37 | 1148 | ret = o2hb_do_disk_heartbeat(reg); |
a7f6a5fb MF |
1149 | |
1150 | do_gettimeofday(&after_hb); | |
1151 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); | |
1152 | ||
b31d308d TM |
1153 | mlog(ML_HEARTBEAT, |
1154 | "start = %lu.%lu, end = %lu.%lu, msec = %u\n", | |
215c7f9f MF |
1155 | before_hb.tv_sec, (unsigned long) before_hb.tv_usec, |
1156 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, | |
1157 | elapsed_msec); | |
a7f6a5fb | 1158 | |
d2eece37 SM |
1159 | if (!kthread_should_stop() && |
1160 | elapsed_msec < reg->hr_timeout_ms) { | |
a7f6a5fb MF |
1161 | /* the kthread api has blocked signals for us so no |
1162 | * need to record the return value. */ | |
1163 | msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | o2hb_disarm_write_timeout(reg); | |
1168 | ||
1169 | /* unclean stop is only used in very bad situation */ | |
1170 | for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++) | |
1171 | o2hb_shutdown_slot(®->hr_slots[i]); | |
1172 | ||
1173 | /* Explicit down notification - avoid forcing the other nodes | |
1174 | * to timeout on this region when we could just as easily | |
1175 | * write a clear generation - thus indicating to them that | |
1176 | * this node has left this region. | |
d2eece37 SM |
1177 | */ |
1178 | if (!reg->hr_unclean_stop && !reg->hr_aborted_start) { | |
1179 | o2hb_prepare_block(reg, 0); | |
1180 | ret = o2hb_issue_node_write(reg, &write_wc); | |
1181 | if (ret == 0) | |
1182 | o2hb_wait_on_io(reg, &write_wc); | |
1183 | else | |
1184 | mlog_errno(ret); | |
a7f6a5fb MF |
1185 | } |
1186 | ||
cfc069d3 SM |
1187 | /* Unpin node */ |
1188 | o2nm_undepend_this_node(); | |
a7f6a5fb | 1189 | |
d2eece37 | 1190 | mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n"); |
a7f6a5fb MF |
1191 | |
1192 | return 0; | |
1193 | } | |
1194 | ||
87d3d3f3 SM |
1195 | #ifdef CONFIG_DEBUG_FS |
1196 | static int o2hb_debug_open(struct inode *inode, struct file *file) | |
1197 | { | |
8ca8b0bb | 1198 | struct o2hb_debug_buf *db = inode->i_private; |
1f285305 | 1199 | struct o2hb_region *reg; |
87d3d3f3 | 1200 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
bb570a5d | 1201 | unsigned long lts; |
87d3d3f3 SM |
1202 | char *buf = NULL; |
1203 | int i = -1; | |
1204 | int out = 0; | |
1205 | ||
8ca8b0bb SM |
1206 | /* max_nodes should be the largest bitmap we pass here */ |
1207 | BUG_ON(sizeof(map) < db->db_size); | |
1208 | ||
87d3d3f3 SM |
1209 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1210 | if (!buf) | |
1211 | goto bail; | |
1212 | ||
8ca8b0bb SM |
1213 | switch (db->db_type) { |
1214 | case O2HB_DB_TYPE_LIVENODES: | |
a6de0136 SM |
1215 | case O2HB_DB_TYPE_LIVEREGIONS: |
1216 | case O2HB_DB_TYPE_QUORUMREGIONS: | |
1217 | case O2HB_DB_TYPE_FAILEDREGIONS: | |
8ca8b0bb SM |
1218 | spin_lock(&o2hb_live_lock); |
1219 | memcpy(map, db->db_data, db->db_size); | |
1220 | spin_unlock(&o2hb_live_lock); | |
1221 | break; | |
87d3d3f3 | 1222 | |
1f285305 SM |
1223 | case O2HB_DB_TYPE_REGION_LIVENODES: |
1224 | spin_lock(&o2hb_live_lock); | |
1225 | reg = (struct o2hb_region *)db->db_data; | |
1226 | memcpy(map, reg->hr_live_node_bitmap, db->db_size); | |
1227 | spin_unlock(&o2hb_live_lock); | |
1228 | break; | |
1229 | ||
1230 | case O2HB_DB_TYPE_REGION_NUMBER: | |
1231 | reg = (struct o2hb_region *)db->db_data; | |
1232 | out += snprintf(buf + out, PAGE_SIZE - out, "%d\n", | |
1233 | reg->hr_region_num); | |
1234 | goto done; | |
1235 | ||
43695d09 SM |
1236 | case O2HB_DB_TYPE_REGION_ELAPSED_TIME: |
1237 | reg = (struct o2hb_region *)db->db_data; | |
bb570a5d SM |
1238 | lts = reg->hr_last_timeout_start; |
1239 | /* If 0, it has never been set before */ | |
1240 | if (lts) | |
1241 | lts = jiffies_to_msecs(jiffies - lts); | |
1242 | out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts); | |
43695d09 SM |
1243 | goto done; |
1244 | ||
cb0586bd SM |
1245 | case O2HB_DB_TYPE_REGION_PINNED: |
1246 | reg = (struct o2hb_region *)db->db_data; | |
1247 | out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", | |
1248 | !!reg->hr_item_pinned); | |
1249 | goto done; | |
1250 | ||
8ca8b0bb SM |
1251 | default: |
1252 | goto done; | |
1253 | } | |
1254 | ||
1255 | while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len) | |
87d3d3f3 SM |
1256 | out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); |
1257 | out += snprintf(buf + out, PAGE_SIZE - out, "\n"); | |
1258 | ||
8ca8b0bb | 1259 | done: |
87d3d3f3 SM |
1260 | i_size_write(inode, out); |
1261 | ||
1262 | file->private_data = buf; | |
1263 | ||
1264 | return 0; | |
1265 | bail: | |
1266 | return -ENOMEM; | |
1267 | } | |
1268 | ||
1269 | static int o2hb_debug_release(struct inode *inode, struct file *file) | |
1270 | { | |
1271 | kfree(file->private_data); | |
1272 | return 0; | |
1273 | } | |
1274 | ||
1275 | static ssize_t o2hb_debug_read(struct file *file, char __user *buf, | |
1276 | size_t nbytes, loff_t *ppos) | |
1277 | { | |
1278 | return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, | |
1279 | i_size_read(file->f_mapping->host)); | |
1280 | } | |
1281 | #else | |
1282 | static int o2hb_debug_open(struct inode *inode, struct file *file) | |
1283 | { | |
1284 | return 0; | |
1285 | } | |
1286 | static int o2hb_debug_release(struct inode *inode, struct file *file) | |
1287 | { | |
1288 | return 0; | |
1289 | } | |
1290 | static ssize_t o2hb_debug_read(struct file *file, char __user *buf, | |
1291 | size_t nbytes, loff_t *ppos) | |
1292 | { | |
1293 | return 0; | |
1294 | } | |
1295 | #endif /* CONFIG_DEBUG_FS */ | |
1296 | ||
828c0950 | 1297 | static const struct file_operations o2hb_debug_fops = { |
87d3d3f3 SM |
1298 | .open = o2hb_debug_open, |
1299 | .release = o2hb_debug_release, | |
1300 | .read = o2hb_debug_read, | |
1301 | .llseek = generic_file_llseek, | |
1302 | }; | |
1303 | ||
1304 | void o2hb_exit(void) | |
1305 | { | |
8ca8b0bb | 1306 | kfree(o2hb_db_livenodes); |
a6de0136 SM |
1307 | kfree(o2hb_db_liveregions); |
1308 | kfree(o2hb_db_quorumregions); | |
1309 | kfree(o2hb_db_failedregions); | |
1310 | debugfs_remove(o2hb_debug_failedregions); | |
1311 | debugfs_remove(o2hb_debug_quorumregions); | |
1312 | debugfs_remove(o2hb_debug_liveregions); | |
8ca8b0bb SM |
1313 | debugfs_remove(o2hb_debug_livenodes); |
1314 | debugfs_remove(o2hb_debug_dir); | |
1315 | } | |
1316 | ||
1317 | static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir, | |
1318 | struct o2hb_debug_buf **db, int db_len, | |
1319 | int type, int size, int len, void *data) | |
1320 | { | |
1321 | *db = kmalloc(db_len, GFP_KERNEL); | |
1322 | if (!*db) | |
1323 | return NULL; | |
1324 | ||
1325 | (*db)->db_type = type; | |
1326 | (*db)->db_size = size; | |
1327 | (*db)->db_len = len; | |
1328 | (*db)->db_data = data; | |
1329 | ||
1330 | return debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db, | |
1331 | &o2hb_debug_fops); | |
1332 | } | |
1333 | ||
1334 | static int o2hb_debug_init(void) | |
1335 | { | |
1336 | int ret = -ENOMEM; | |
1337 | ||
1338 | o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL); | |
1339 | if (!o2hb_debug_dir) { | |
1340 | mlog_errno(ret); | |
1341 | goto bail; | |
1342 | } | |
1343 | ||
1344 | o2hb_debug_livenodes = o2hb_debug_create(O2HB_DEBUG_LIVENODES, | |
1345 | o2hb_debug_dir, | |
1346 | &o2hb_db_livenodes, | |
1347 | sizeof(*o2hb_db_livenodes), | |
1348 | O2HB_DB_TYPE_LIVENODES, | |
1349 | sizeof(o2hb_live_node_bitmap), | |
1350 | O2NM_MAX_NODES, | |
1351 | o2hb_live_node_bitmap); | |
1352 | if (!o2hb_debug_livenodes) { | |
1353 | mlog_errno(ret); | |
1354 | goto bail; | |
1355 | } | |
a6de0136 SM |
1356 | |
1357 | o2hb_debug_liveregions = o2hb_debug_create(O2HB_DEBUG_LIVEREGIONS, | |
1358 | o2hb_debug_dir, | |
1359 | &o2hb_db_liveregions, | |
1360 | sizeof(*o2hb_db_liveregions), | |
1361 | O2HB_DB_TYPE_LIVEREGIONS, | |
1362 | sizeof(o2hb_live_region_bitmap), | |
1363 | O2NM_MAX_REGIONS, | |
1364 | o2hb_live_region_bitmap); | |
1365 | if (!o2hb_debug_liveregions) { | |
1366 | mlog_errno(ret); | |
1367 | goto bail; | |
1368 | } | |
1369 | ||
1370 | o2hb_debug_quorumregions = | |
1371 | o2hb_debug_create(O2HB_DEBUG_QUORUMREGIONS, | |
1372 | o2hb_debug_dir, | |
1373 | &o2hb_db_quorumregions, | |
1374 | sizeof(*o2hb_db_quorumregions), | |
1375 | O2HB_DB_TYPE_QUORUMREGIONS, | |
1376 | sizeof(o2hb_quorum_region_bitmap), | |
1377 | O2NM_MAX_REGIONS, | |
1378 | o2hb_quorum_region_bitmap); | |
1379 | if (!o2hb_debug_quorumregions) { | |
1380 | mlog_errno(ret); | |
1381 | goto bail; | |
1382 | } | |
1383 | ||
1384 | o2hb_debug_failedregions = | |
1385 | o2hb_debug_create(O2HB_DEBUG_FAILEDREGIONS, | |
1386 | o2hb_debug_dir, | |
1387 | &o2hb_db_failedregions, | |
1388 | sizeof(*o2hb_db_failedregions), | |
1389 | O2HB_DB_TYPE_FAILEDREGIONS, | |
1390 | sizeof(o2hb_failed_region_bitmap), | |
1391 | O2NM_MAX_REGIONS, | |
1392 | o2hb_failed_region_bitmap); | |
1393 | if (!o2hb_debug_failedregions) { | |
1394 | mlog_errno(ret); | |
1395 | goto bail; | |
1396 | } | |
1397 | ||
8ca8b0bb SM |
1398 | ret = 0; |
1399 | bail: | |
1400 | if (ret) | |
1401 | o2hb_exit(); | |
1402 | ||
1403 | return ret; | |
87d3d3f3 SM |
1404 | } |
1405 | ||
1406 | int o2hb_init(void) | |
a7f6a5fb MF |
1407 | { |
1408 | int i; | |
1409 | ||
1410 | for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++) | |
1411 | INIT_LIST_HEAD(&o2hb_callbacks[i].list); | |
1412 | ||
1413 | for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++) | |
1414 | INIT_LIST_HEAD(&o2hb_live_slots[i]); | |
1415 | ||
1416 | INIT_LIST_HEAD(&o2hb_node_events); | |
1417 | ||
1418 | memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap)); | |
536f0741 | 1419 | memset(o2hb_region_bitmap, 0, sizeof(o2hb_region_bitmap)); |
e7d656ba | 1420 | memset(o2hb_live_region_bitmap, 0, sizeof(o2hb_live_region_bitmap)); |
43182d2a | 1421 | memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap)); |
b1c5ebfb | 1422 | memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap)); |
87d3d3f3 | 1423 | |
58a3158a SM |
1424 | o2hb_dependent_users = 0; |
1425 | ||
8ca8b0bb | 1426 | return o2hb_debug_init(); |
a7f6a5fb MF |
1427 | } |
1428 | ||
1429 | /* if we're already in a callback then we're already serialized by the sem */ | |
1430 | static void o2hb_fill_node_map_from_callback(unsigned long *map, | |
1431 | unsigned bytes) | |
1432 | { | |
1433 | BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))); | |
1434 | ||
1435 | memcpy(map, &o2hb_live_node_bitmap, bytes); | |
1436 | } | |
1437 | ||
1438 | /* | |
1439 | * get a map of all nodes that are heartbeating in any regions | |
1440 | */ | |
1441 | void o2hb_fill_node_map(unsigned long *map, unsigned bytes) | |
1442 | { | |
1443 | /* callers want to serialize this map and callbacks so that they | |
1444 | * can trust that they don't miss nodes coming to the party */ | |
1445 | down_read(&o2hb_callback_sem); | |
1446 | spin_lock(&o2hb_live_lock); | |
1447 | o2hb_fill_node_map_from_callback(map, bytes); | |
1448 | spin_unlock(&o2hb_live_lock); | |
1449 | up_read(&o2hb_callback_sem); | |
1450 | } | |
1451 | EXPORT_SYMBOL_GPL(o2hb_fill_node_map); | |
1452 | ||
1453 | /* | |
1454 | * heartbeat configfs bits. The heartbeat set is a default set under | |
1455 | * the cluster set in nodemanager.c. | |
1456 | */ | |
1457 | ||
1458 | static struct o2hb_region *to_o2hb_region(struct config_item *item) | |
1459 | { | |
1460 | return item ? container_of(item, struct o2hb_region, hr_item) : NULL; | |
1461 | } | |
1462 | ||
1463 | /* drop_item only drops its ref after killing the thread, nothing should | |
1464 | * be using the region anymore. this has to clean up any state that | |
1465 | * attributes might have built up. */ | |
1466 | static void o2hb_region_release(struct config_item *item) | |
1467 | { | |
1468 | int i; | |
1469 | struct page *page; | |
1470 | struct o2hb_region *reg = to_o2hb_region(item); | |
1471 | ||
d2eece37 SM |
1472 | mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name); |
1473 | ||
d787ab09 | 1474 | kfree(reg->hr_tmp_block); |
a7f6a5fb MF |
1475 | |
1476 | if (reg->hr_slot_data) { | |
1477 | for (i = 0; i < reg->hr_num_pages; i++) { | |
1478 | page = reg->hr_slot_data[i]; | |
1479 | if (page) | |
1480 | __free_page(page); | |
1481 | } | |
1482 | kfree(reg->hr_slot_data); | |
1483 | } | |
1484 | ||
1485 | if (reg->hr_bdev) | |
9a1c3542 | 1486 | blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); |
a7f6a5fb | 1487 | |
d787ab09 | 1488 | kfree(reg->hr_slots); |
a7f6a5fb | 1489 | |
1f285305 SM |
1490 | kfree(reg->hr_db_regnum); |
1491 | kfree(reg->hr_db_livenodes); | |
1492 | debugfs_remove(reg->hr_debug_livenodes); | |
1493 | debugfs_remove(reg->hr_debug_regnum); | |
d4396eaf | 1494 | debugfs_remove(reg->hr_debug_elapsed_time); |
cb0586bd | 1495 | debugfs_remove(reg->hr_debug_pinned); |
1f285305 SM |
1496 | debugfs_remove(reg->hr_debug_dir); |
1497 | ||
a7f6a5fb MF |
1498 | spin_lock(&o2hb_live_lock); |
1499 | list_del(®->hr_all_item); | |
1500 | spin_unlock(&o2hb_live_lock); | |
1501 | ||
1502 | kfree(reg); | |
1503 | } | |
1504 | ||
1505 | static int o2hb_read_block_input(struct o2hb_region *reg, | |
1506 | const char *page, | |
1507 | size_t count, | |
1508 | unsigned long *ret_bytes, | |
1509 | unsigned int *ret_bits) | |
1510 | { | |
1511 | unsigned long bytes; | |
1512 | char *p = (char *)page; | |
1513 | ||
1514 | bytes = simple_strtoul(p, &p, 0); | |
1515 | if (!p || (*p && (*p != '\n'))) | |
1516 | return -EINVAL; | |
1517 | ||
1518 | /* Heartbeat and fs min / max block sizes are the same. */ | |
1519 | if (bytes > 4096 || bytes < 512) | |
1520 | return -ERANGE; | |
1521 | if (hweight16(bytes) != 1) | |
1522 | return -EINVAL; | |
1523 | ||
1524 | if (ret_bytes) | |
1525 | *ret_bytes = bytes; | |
1526 | if (ret_bits) | |
1527 | *ret_bits = ffs(bytes) - 1; | |
1528 | ||
1529 | return 0; | |
1530 | } | |
1531 | ||
1532 | static ssize_t o2hb_region_block_bytes_read(struct o2hb_region *reg, | |
1533 | char *page) | |
1534 | { | |
1535 | return sprintf(page, "%u\n", reg->hr_block_bytes); | |
1536 | } | |
1537 | ||
1538 | static ssize_t o2hb_region_block_bytes_write(struct o2hb_region *reg, | |
1539 | const char *page, | |
1540 | size_t count) | |
1541 | { | |
1542 | int status; | |
1543 | unsigned long block_bytes; | |
1544 | unsigned int block_bits; | |
1545 | ||
1546 | if (reg->hr_bdev) | |
1547 | return -EINVAL; | |
1548 | ||
1549 | status = o2hb_read_block_input(reg, page, count, | |
1550 | &block_bytes, &block_bits); | |
1551 | if (status) | |
1552 | return status; | |
1553 | ||
1554 | reg->hr_block_bytes = (unsigned int)block_bytes; | |
1555 | reg->hr_block_bits = block_bits; | |
1556 | ||
1557 | return count; | |
1558 | } | |
1559 | ||
1560 | static ssize_t o2hb_region_start_block_read(struct o2hb_region *reg, | |
1561 | char *page) | |
1562 | { | |
1563 | return sprintf(page, "%llu\n", reg->hr_start_block); | |
1564 | } | |
1565 | ||
1566 | static ssize_t o2hb_region_start_block_write(struct o2hb_region *reg, | |
1567 | const char *page, | |
1568 | size_t count) | |
1569 | { | |
1570 | unsigned long long tmp; | |
1571 | char *p = (char *)page; | |
1572 | ||
1573 | if (reg->hr_bdev) | |
1574 | return -EINVAL; | |
1575 | ||
1576 | tmp = simple_strtoull(p, &p, 0); | |
1577 | if (!p || (*p && (*p != '\n'))) | |
1578 | return -EINVAL; | |
1579 | ||
1580 | reg->hr_start_block = tmp; | |
1581 | ||
1582 | return count; | |
1583 | } | |
1584 | ||
1585 | static ssize_t o2hb_region_blocks_read(struct o2hb_region *reg, | |
1586 | char *page) | |
1587 | { | |
1588 | return sprintf(page, "%d\n", reg->hr_blocks); | |
1589 | } | |
1590 | ||
1591 | static ssize_t o2hb_region_blocks_write(struct o2hb_region *reg, | |
1592 | const char *page, | |
1593 | size_t count) | |
1594 | { | |
1595 | unsigned long tmp; | |
1596 | char *p = (char *)page; | |
1597 | ||
1598 | if (reg->hr_bdev) | |
1599 | return -EINVAL; | |
1600 | ||
1601 | tmp = simple_strtoul(p, &p, 0); | |
1602 | if (!p || (*p && (*p != '\n'))) | |
1603 | return -EINVAL; | |
1604 | ||
1605 | if (tmp > O2NM_MAX_NODES || tmp == 0) | |
1606 | return -ERANGE; | |
1607 | ||
1608 | reg->hr_blocks = (unsigned int)tmp; | |
1609 | ||
1610 | return count; | |
1611 | } | |
1612 | ||
1613 | static ssize_t o2hb_region_dev_read(struct o2hb_region *reg, | |
1614 | char *page) | |
1615 | { | |
1616 | unsigned int ret = 0; | |
1617 | ||
1618 | if (reg->hr_bdev) | |
1619 | ret = sprintf(page, "%s\n", reg->hr_dev_name); | |
1620 | ||
1621 | return ret; | |
1622 | } | |
1623 | ||
1624 | static void o2hb_init_region_params(struct o2hb_region *reg) | |
1625 | { | |
1626 | reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; | |
1627 | reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; | |
1628 | ||
1629 | mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", | |
1630 | reg->hr_start_block, reg->hr_blocks); | |
1631 | mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n", | |
1632 | reg->hr_block_bytes, reg->hr_block_bits); | |
1633 | mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms); | |
1634 | mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold); | |
1635 | } | |
1636 | ||
1637 | static int o2hb_map_slot_data(struct o2hb_region *reg) | |
1638 | { | |
1639 | int i, j; | |
1640 | unsigned int last_slot; | |
1641 | unsigned int spp = reg->hr_slots_per_page; | |
1642 | struct page *page; | |
1643 | char *raw; | |
1644 | struct o2hb_disk_slot *slot; | |
1645 | ||
1646 | reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL); | |
1647 | if (reg->hr_tmp_block == NULL) { | |
1648 | mlog_errno(-ENOMEM); | |
1649 | return -ENOMEM; | |
1650 | } | |
1651 | ||
1652 | reg->hr_slots = kcalloc(reg->hr_blocks, | |
1653 | sizeof(struct o2hb_disk_slot), GFP_KERNEL); | |
1654 | if (reg->hr_slots == NULL) { | |
1655 | mlog_errno(-ENOMEM); | |
1656 | return -ENOMEM; | |
1657 | } | |
1658 | ||
1659 | for(i = 0; i < reg->hr_blocks; i++) { | |
1660 | slot = ®->hr_slots[i]; | |
1661 | slot->ds_node_num = i; | |
1662 | INIT_LIST_HEAD(&slot->ds_live_item); | |
1663 | slot->ds_raw_block = NULL; | |
1664 | } | |
1665 | ||
1666 | reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp; | |
1667 | mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks " | |
1668 | "at %u blocks per page\n", | |
1669 | reg->hr_num_pages, reg->hr_blocks, spp); | |
1670 | ||
1671 | reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *), | |
1672 | GFP_KERNEL); | |
1673 | if (!reg->hr_slot_data) { | |
1674 | mlog_errno(-ENOMEM); | |
1675 | return -ENOMEM; | |
1676 | } | |
1677 | ||
1678 | for(i = 0; i < reg->hr_num_pages; i++) { | |
1679 | page = alloc_page(GFP_KERNEL); | |
1680 | if (!page) { | |
1681 | mlog_errno(-ENOMEM); | |
1682 | return -ENOMEM; | |
1683 | } | |
1684 | ||
1685 | reg->hr_slot_data[i] = page; | |
1686 | ||
1687 | last_slot = i * spp; | |
1688 | raw = page_address(page); | |
1689 | for (j = 0; | |
1690 | (j < spp) && ((j + last_slot) < reg->hr_blocks); | |
1691 | j++) { | |
1692 | BUG_ON((j + last_slot) >= reg->hr_blocks); | |
1693 | ||
1694 | slot = ®->hr_slots[j + last_slot]; | |
1695 | slot->ds_raw_block = | |
1696 | (struct o2hb_disk_heartbeat_block *) raw; | |
1697 | ||
1698 | raw += reg->hr_block_bytes; | |
1699 | } | |
1700 | } | |
1701 | ||
1702 | return 0; | |
1703 | } | |
1704 | ||
1705 | /* Read in all the slots available and populate the tracking | |
1706 | * structures so that we can start with a baseline idea of what's | |
1707 | * there. */ | |
1708 | static int o2hb_populate_slot_data(struct o2hb_region *reg) | |
1709 | { | |
1710 | int ret, i; | |
1711 | struct o2hb_disk_slot *slot; | |
1712 | struct o2hb_disk_heartbeat_block *hb_block; | |
1713 | ||
a7f6a5fb MF |
1714 | ret = o2hb_read_slots(reg, reg->hr_blocks); |
1715 | if (ret) { | |
1716 | mlog_errno(ret); | |
1717 | goto out; | |
1718 | } | |
1719 | ||
1720 | /* We only want to get an idea of the values initially in each | |
1721 | * slot, so we do no verification - o2hb_check_slot will | |
1722 | * actually determine if each configured slot is valid and | |
1723 | * whether any values have changed. */ | |
1724 | for(i = 0; i < reg->hr_blocks; i++) { | |
1725 | slot = ®->hr_slots[i]; | |
1726 | hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block; | |
1727 | ||
1728 | /* Only fill the values that o2hb_check_slot uses to | |
1729 | * determine changing slots */ | |
1730 | slot->ds_last_time = le64_to_cpu(hb_block->hb_seq); | |
1731 | slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); | |
1732 | } | |
1733 | ||
1734 | out: | |
a7f6a5fb MF |
1735 | return ret; |
1736 | } | |
1737 | ||
1738 | /* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */ | |
1739 | static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |
1740 | const char *page, | |
1741 | size_t count) | |
1742 | { | |
e6c352db | 1743 | struct task_struct *hb_task; |
a7f6a5fb MF |
1744 | long fd; |
1745 | int sectsize; | |
1746 | char *p = (char *)page; | |
2903ff01 AV |
1747 | struct fd f; |
1748 | struct inode *inode; | |
a7f6a5fb | 1749 | ssize_t ret = -EINVAL; |
76d9fc29 | 1750 | int live_threshold; |
a7f6a5fb MF |
1751 | |
1752 | if (reg->hr_bdev) | |
1753 | goto out; | |
1754 | ||
1755 | /* We can't heartbeat without having had our node number | |
1756 | * configured yet. */ | |
1757 | if (o2nm_this_node() == O2NM_MAX_NODES) | |
1758 | goto out; | |
1759 | ||
1760 | fd = simple_strtol(p, &p, 0); | |
1761 | if (!p || (*p && (*p != '\n'))) | |
1762 | goto out; | |
1763 | ||
1764 | if (fd < 0 || fd >= INT_MAX) | |
1765 | goto out; | |
1766 | ||
2903ff01 AV |
1767 | f = fdget(fd); |
1768 | if (f.file == NULL) | |
a7f6a5fb MF |
1769 | goto out; |
1770 | ||
1771 | if (reg->hr_blocks == 0 || reg->hr_start_block == 0 || | |
1772 | reg->hr_block_bytes == 0) | |
2903ff01 | 1773 | goto out2; |
a7f6a5fb | 1774 | |
2903ff01 | 1775 | inode = igrab(f.file->f_mapping->host); |
a7f6a5fb | 1776 | if (inode == NULL) |
2903ff01 | 1777 | goto out2; |
a7f6a5fb MF |
1778 | |
1779 | if (!S_ISBLK(inode->i_mode)) | |
2903ff01 | 1780 | goto out3; |
a7f6a5fb | 1781 | |
2903ff01 | 1782 | reg->hr_bdev = I_BDEV(f.file->f_mapping->host); |
e525fd89 | 1783 | ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL); |
a7f6a5fb MF |
1784 | if (ret) { |
1785 | reg->hr_bdev = NULL; | |
2903ff01 | 1786 | goto out3; |
a7f6a5fb MF |
1787 | } |
1788 | inode = NULL; | |
1789 | ||
1790 | bdevname(reg->hr_bdev, reg->hr_dev_name); | |
1791 | ||
e1defc4f | 1792 | sectsize = bdev_logical_block_size(reg->hr_bdev); |
a7f6a5fb MF |
1793 | if (sectsize != reg->hr_block_bytes) { |
1794 | mlog(ML_ERROR, | |
1795 | "blocksize %u incorrect for device, expected %d", | |
1796 | reg->hr_block_bytes, sectsize); | |
1797 | ret = -EINVAL; | |
2903ff01 | 1798 | goto out3; |
a7f6a5fb MF |
1799 | } |
1800 | ||
1801 | o2hb_init_region_params(reg); | |
1802 | ||
1803 | /* Generation of zero is invalid */ | |
1804 | do { | |
1805 | get_random_bytes(®->hr_generation, | |
1806 | sizeof(reg->hr_generation)); | |
1807 | } while (reg->hr_generation == 0); | |
1808 | ||
1809 | ret = o2hb_map_slot_data(reg); | |
1810 | if (ret) { | |
1811 | mlog_errno(ret); | |
2903ff01 | 1812 | goto out3; |
a7f6a5fb MF |
1813 | } |
1814 | ||
1815 | ret = o2hb_populate_slot_data(reg); | |
1816 | if (ret) { | |
1817 | mlog_errno(ret); | |
2903ff01 | 1818 | goto out3; |
a7f6a5fb MF |
1819 | } |
1820 | ||
c4028958 | 1821 | INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); |
a7f6a5fb MF |
1822 | |
1823 | /* | |
1824 | * A node is considered live after it has beat LIVE_THRESHOLD | |
1825 | * times. We're not steady until we've given them a chance | |
1826 | * _after_ our first read. | |
76d9fc29 SM |
1827 | * The default threshold is bare minimum so as to limit the delay |
1828 | * during mounts. For global heartbeat, the threshold doubled for the | |
1829 | * first region. | |
a7f6a5fb | 1830 | */ |
76d9fc29 SM |
1831 | live_threshold = O2HB_LIVE_THRESHOLD; |
1832 | if (o2hb_global_heartbeat_active()) { | |
1833 | spin_lock(&o2hb_live_lock); | |
1834 | if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) | |
1835 | live_threshold <<= 1; | |
1836 | spin_unlock(&o2hb_live_lock); | |
1837 | } | |
d2eece37 SM |
1838 | ++live_threshold; |
1839 | atomic_set(®->hr_steady_iterations, live_threshold); | |
1840 | /* unsteady_iterations is double the steady_iterations */ | |
1841 | atomic_set(®->hr_unsteady_iterations, (live_threshold << 1)); | |
a7f6a5fb | 1842 | |
e6c352db JB |
1843 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", |
1844 | reg->hr_item.ci_name); | |
1845 | if (IS_ERR(hb_task)) { | |
1846 | ret = PTR_ERR(hb_task); | |
a7f6a5fb | 1847 | mlog_errno(ret); |
2903ff01 | 1848 | goto out3; |
a7f6a5fb MF |
1849 | } |
1850 | ||
e6c352db JB |
1851 | spin_lock(&o2hb_live_lock); |
1852 | reg->hr_task = hb_task; | |
1853 | spin_unlock(&o2hb_live_lock); | |
1854 | ||
a7f6a5fb MF |
1855 | ret = wait_event_interruptible(o2hb_steady_queue, |
1856 | atomic_read(®->hr_steady_iterations) == 0); | |
1857 | if (ret) { | |
d2eece37 SM |
1858 | atomic_set(®->hr_steady_iterations, 0); |
1859 | reg->hr_aborted_start = 1; | |
1860 | } | |
e6c352db | 1861 | |
d2eece37 SM |
1862 | if (reg->hr_aborted_start) { |
1863 | ret = -EIO; | |
2903ff01 | 1864 | goto out3; |
a7f6a5fb MF |
1865 | } |
1866 | ||
e6df3a66 JB |
1867 | /* Ok, we were woken. Make sure it wasn't by drop_item() */ |
1868 | spin_lock(&o2hb_live_lock); | |
1869 | hb_task = reg->hr_task; | |
e7d656ba SM |
1870 | if (o2hb_global_heartbeat_active()) |
1871 | set_bit(reg->hr_region_num, o2hb_live_region_bitmap); | |
e6df3a66 JB |
1872 | spin_unlock(&o2hb_live_lock); |
1873 | ||
1874 | if (hb_task) | |
1875 | ret = count; | |
1876 | else | |
1877 | ret = -EIO; | |
1878 | ||
18c50cb0 | 1879 | if (hb_task && o2hb_global_heartbeat_active()) |
d2eece37 SM |
1880 | printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n", |
1881 | config_item_name(®->hr_item), reg->hr_dev_name); | |
18c50cb0 | 1882 | |
2903ff01 AV |
1883 | out3: |
1884 | iput(inode); | |
1885 | out2: | |
1886 | fdput(f); | |
a7f6a5fb | 1887 | out: |
a7f6a5fb MF |
1888 | if (ret < 0) { |
1889 | if (reg->hr_bdev) { | |
9a1c3542 | 1890 | blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); |
a7f6a5fb MF |
1891 | reg->hr_bdev = NULL; |
1892 | } | |
1893 | } | |
1894 | return ret; | |
1895 | } | |
1896 | ||
92efc152 ZW |
1897 | static ssize_t o2hb_region_pid_read(struct o2hb_region *reg, |
1898 | char *page) | |
1899 | { | |
e6c352db JB |
1900 | pid_t pid = 0; |
1901 | ||
1902 | spin_lock(&o2hb_live_lock); | |
1903 | if (reg->hr_task) | |
ba25f9dc | 1904 | pid = task_pid_nr(reg->hr_task); |
e6c352db JB |
1905 | spin_unlock(&o2hb_live_lock); |
1906 | ||
1907 | if (!pid) | |
92efc152 ZW |
1908 | return 0; |
1909 | ||
e6c352db | 1910 | return sprintf(page, "%u\n", pid); |
92efc152 ZW |
1911 | } |
1912 | ||
a7f6a5fb MF |
1913 | struct o2hb_region_attribute { |
1914 | struct configfs_attribute attr; | |
1915 | ssize_t (*show)(struct o2hb_region *, char *); | |
1916 | ssize_t (*store)(struct o2hb_region *, const char *, size_t); | |
1917 | }; | |
1918 | ||
1919 | static struct o2hb_region_attribute o2hb_region_attr_block_bytes = { | |
1920 | .attr = { .ca_owner = THIS_MODULE, | |
1921 | .ca_name = "block_bytes", | |
1922 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1923 | .show = o2hb_region_block_bytes_read, | |
1924 | .store = o2hb_region_block_bytes_write, | |
1925 | }; | |
1926 | ||
1927 | static struct o2hb_region_attribute o2hb_region_attr_start_block = { | |
1928 | .attr = { .ca_owner = THIS_MODULE, | |
1929 | .ca_name = "start_block", | |
1930 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1931 | .show = o2hb_region_start_block_read, | |
1932 | .store = o2hb_region_start_block_write, | |
1933 | }; | |
1934 | ||
1935 | static struct o2hb_region_attribute o2hb_region_attr_blocks = { | |
1936 | .attr = { .ca_owner = THIS_MODULE, | |
1937 | .ca_name = "blocks", | |
1938 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1939 | .show = o2hb_region_blocks_read, | |
1940 | .store = o2hb_region_blocks_write, | |
1941 | }; | |
1942 | ||
1943 | static struct o2hb_region_attribute o2hb_region_attr_dev = { | |
1944 | .attr = { .ca_owner = THIS_MODULE, | |
1945 | .ca_name = "dev", | |
1946 | .ca_mode = S_IRUGO | S_IWUSR }, | |
1947 | .show = o2hb_region_dev_read, | |
1948 | .store = o2hb_region_dev_write, | |
1949 | }; | |
1950 | ||
92efc152 ZW |
1951 | static struct o2hb_region_attribute o2hb_region_attr_pid = { |
1952 | .attr = { .ca_owner = THIS_MODULE, | |
1953 | .ca_name = "pid", | |
1954 | .ca_mode = S_IRUGO | S_IRUSR }, | |
1955 | .show = o2hb_region_pid_read, | |
1956 | }; | |
1957 | ||
a7f6a5fb MF |
1958 | static struct configfs_attribute *o2hb_region_attrs[] = { |
1959 | &o2hb_region_attr_block_bytes.attr, | |
1960 | &o2hb_region_attr_start_block.attr, | |
1961 | &o2hb_region_attr_blocks.attr, | |
1962 | &o2hb_region_attr_dev.attr, | |
92efc152 | 1963 | &o2hb_region_attr_pid.attr, |
a7f6a5fb MF |
1964 | NULL, |
1965 | }; | |
1966 | ||
1967 | static ssize_t o2hb_region_show(struct config_item *item, | |
1968 | struct configfs_attribute *attr, | |
1969 | char *page) | |
1970 | { | |
1971 | struct o2hb_region *reg = to_o2hb_region(item); | |
1972 | struct o2hb_region_attribute *o2hb_region_attr = | |
1973 | container_of(attr, struct o2hb_region_attribute, attr); | |
1974 | ssize_t ret = 0; | |
1975 | ||
1976 | if (o2hb_region_attr->show) | |
1977 | ret = o2hb_region_attr->show(reg, page); | |
1978 | return ret; | |
1979 | } | |
1980 | ||
1981 | static ssize_t o2hb_region_store(struct config_item *item, | |
1982 | struct configfs_attribute *attr, | |
1983 | const char *page, size_t count) | |
1984 | { | |
1985 | struct o2hb_region *reg = to_o2hb_region(item); | |
1986 | struct o2hb_region_attribute *o2hb_region_attr = | |
1987 | container_of(attr, struct o2hb_region_attribute, attr); | |
1988 | ssize_t ret = -EINVAL; | |
1989 | ||
1990 | if (o2hb_region_attr->store) | |
1991 | ret = o2hb_region_attr->store(reg, page, count); | |
1992 | return ret; | |
1993 | } | |
1994 | ||
1995 | static struct configfs_item_operations o2hb_region_item_ops = { | |
1996 | .release = o2hb_region_release, | |
1997 | .show_attribute = o2hb_region_show, | |
1998 | .store_attribute = o2hb_region_store, | |
1999 | }; | |
2000 | ||
2001 | static struct config_item_type o2hb_region_type = { | |
2002 | .ct_item_ops = &o2hb_region_item_ops, | |
2003 | .ct_attrs = o2hb_region_attrs, | |
2004 | .ct_owner = THIS_MODULE, | |
2005 | }; | |
2006 | ||
2007 | /* heartbeat set */ | |
2008 | ||
2009 | struct o2hb_heartbeat_group { | |
2010 | struct config_group hs_group; | |
2011 | /* some stuff? */ | |
2012 | }; | |
2013 | ||
2014 | static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group) | |
2015 | { | |
2016 | return group ? | |
2017 | container_of(group, struct o2hb_heartbeat_group, hs_group) | |
2018 | : NULL; | |
2019 | } | |
2020 | ||
1f285305 SM |
2021 | static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir) |
2022 | { | |
2023 | int ret = -ENOMEM; | |
2024 | ||
2025 | reg->hr_debug_dir = | |
2026 | debugfs_create_dir(config_item_name(®->hr_item), dir); | |
2027 | if (!reg->hr_debug_dir) { | |
2028 | mlog_errno(ret); | |
2029 | goto bail; | |
2030 | } | |
2031 | ||
2032 | reg->hr_debug_livenodes = | |
2033 | o2hb_debug_create(O2HB_DEBUG_LIVENODES, | |
2034 | reg->hr_debug_dir, | |
2035 | &(reg->hr_db_livenodes), | |
2036 | sizeof(*(reg->hr_db_livenodes)), | |
2037 | O2HB_DB_TYPE_REGION_LIVENODES, | |
2038 | sizeof(reg->hr_live_node_bitmap), | |
2039 | O2NM_MAX_NODES, reg); | |
2040 | if (!reg->hr_debug_livenodes) { | |
2041 | mlog_errno(ret); | |
2042 | goto bail; | |
2043 | } | |
2044 | ||
2045 | reg->hr_debug_regnum = | |
2046 | o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, | |
2047 | reg->hr_debug_dir, | |
2048 | &(reg->hr_db_regnum), | |
2049 | sizeof(*(reg->hr_db_regnum)), | |
2050 | O2HB_DB_TYPE_REGION_NUMBER, | |
2051 | 0, O2NM_MAX_NODES, reg); | |
2052 | if (!reg->hr_debug_regnum) { | |
2053 | mlog_errno(ret); | |
2054 | goto bail; | |
2055 | } | |
2056 | ||
43695d09 SM |
2057 | reg->hr_debug_elapsed_time = |
2058 | o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME, | |
2059 | reg->hr_debug_dir, | |
2060 | &(reg->hr_db_elapsed_time), | |
2061 | sizeof(*(reg->hr_db_elapsed_time)), | |
2062 | O2HB_DB_TYPE_REGION_ELAPSED_TIME, | |
2063 | 0, 0, reg); | |
2064 | if (!reg->hr_debug_elapsed_time) { | |
2065 | mlog_errno(ret); | |
2066 | goto bail; | |
2067 | } | |
2068 | ||
cb0586bd SM |
2069 | reg->hr_debug_pinned = |
2070 | o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, | |
2071 | reg->hr_debug_dir, | |
2072 | &(reg->hr_db_pinned), | |
2073 | sizeof(*(reg->hr_db_pinned)), | |
2074 | O2HB_DB_TYPE_REGION_PINNED, | |
2075 | 0, 0, reg); | |
2076 | if (!reg->hr_debug_pinned) { | |
2077 | mlog_errno(ret); | |
2078 | goto bail; | |
2079 | } | |
2080 | ||
1f285305 SM |
2081 | ret = 0; |
2082 | bail: | |
2083 | return ret; | |
2084 | } | |
2085 | ||
f89ab861 JB |
2086 | static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group, |
2087 | const char *name) | |
a7f6a5fb MF |
2088 | { |
2089 | struct o2hb_region *reg = NULL; | |
1f285305 | 2090 | int ret; |
a7f6a5fb | 2091 | |
cd861280 | 2092 | reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL); |
f89ab861 | 2093 | if (reg == NULL) |
a6795e9e | 2094 | return ERR_PTR(-ENOMEM); |
a7f6a5fb | 2095 | |
1cf257f5 JS |
2096 | if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) { |
2097 | ret = -ENAMETOOLONG; | |
2098 | goto free; | |
2099 | } | |
b3c85c4c | 2100 | |
a7f6a5fb | 2101 | spin_lock(&o2hb_live_lock); |
536f0741 SM |
2102 | reg->hr_region_num = 0; |
2103 | if (o2hb_global_heartbeat_active()) { | |
2104 | reg->hr_region_num = find_first_zero_bit(o2hb_region_bitmap, | |
2105 | O2NM_MAX_REGIONS); | |
2106 | if (reg->hr_region_num >= O2NM_MAX_REGIONS) { | |
2107 | spin_unlock(&o2hb_live_lock); | |
1cf257f5 JS |
2108 | ret = -EFBIG; |
2109 | goto free; | |
536f0741 SM |
2110 | } |
2111 | set_bit(reg->hr_region_num, o2hb_region_bitmap); | |
2112 | } | |
a7f6a5fb MF |
2113 | list_add_tail(®->hr_all_item, &o2hb_all_regions); |
2114 | spin_unlock(&o2hb_live_lock); | |
a7f6a5fb | 2115 | |
536f0741 SM |
2116 | config_item_init_type_name(®->hr_item, name, &o2hb_region_type); |
2117 | ||
1f285305 SM |
2118 | ret = o2hb_debug_region_init(reg, o2hb_debug_dir); |
2119 | if (ret) { | |
2120 | config_item_put(®->hr_item); | |
1cf257f5 | 2121 | goto free; |
1f285305 SM |
2122 | } |
2123 | ||
a6795e9e | 2124 | return ®->hr_item; |
1cf257f5 JS |
2125 | free: |
2126 | kfree(reg); | |
2127 | return ERR_PTR(ret); | |
a7f6a5fb MF |
2128 | } |
2129 | ||
2130 | static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |
2131 | struct config_item *item) | |
2132 | { | |
e6c352db | 2133 | struct task_struct *hb_task; |
a7f6a5fb | 2134 | struct o2hb_region *reg = to_o2hb_region(item); |
58a3158a | 2135 | int quorum_region = 0; |
a7f6a5fb MF |
2136 | |
2137 | /* stop the thread when the user removes the region dir */ | |
e6c352db JB |
2138 | spin_lock(&o2hb_live_lock); |
2139 | hb_task = reg->hr_task; | |
2140 | reg->hr_task = NULL; | |
58a3158a | 2141 | reg->hr_item_dropped = 1; |
e6c352db JB |
2142 | spin_unlock(&o2hb_live_lock); |
2143 | ||
2144 | if (hb_task) | |
2145 | kthread_stop(hb_task); | |
a7f6a5fb | 2146 | |
d2eece37 SM |
2147 | if (o2hb_global_heartbeat_active()) { |
2148 | spin_lock(&o2hb_live_lock); | |
2149 | clear_bit(reg->hr_region_num, o2hb_region_bitmap); | |
2150 | clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); | |
2151 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | |
2152 | quorum_region = 1; | |
2153 | clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | |
2154 | spin_unlock(&o2hb_live_lock); | |
2155 | printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n", | |
2156 | ((atomic_read(®->hr_steady_iterations) == 0) ? | |
2157 | "stopped" : "start aborted"), config_item_name(item), | |
2158 | reg->hr_dev_name); | |
2159 | } | |
2160 | ||
e6df3a66 JB |
2161 | /* |
2162 | * If we're racing a dev_write(), we need to wake them. They will | |
2163 | * check reg->hr_task | |
2164 | */ | |
2165 | if (atomic_read(®->hr_steady_iterations) != 0) { | |
d2eece37 | 2166 | reg->hr_aborted_start = 1; |
e6df3a66 JB |
2167 | atomic_set(®->hr_steady_iterations, 0); |
2168 | wake_up(&o2hb_steady_queue); | |
2169 | } | |
2170 | ||
a7f6a5fb | 2171 | config_item_put(item); |
58a3158a SM |
2172 | |
2173 | if (!o2hb_global_heartbeat_active() || !quorum_region) | |
2174 | return; | |
2175 | ||
2176 | /* | |
2177 | * If global heartbeat active and there are dependent users, | |
2178 | * pin all regions if quorum region count <= CUT_OFF | |
2179 | */ | |
2180 | spin_lock(&o2hb_live_lock); | |
2181 | ||
2182 | if (!o2hb_dependent_users) | |
2183 | goto unlock; | |
2184 | ||
2185 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | |
2186 | O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) | |
2187 | o2hb_region_pin(NULL); | |
2188 | ||
2189 | unlock: | |
2190 | spin_unlock(&o2hb_live_lock); | |
a7f6a5fb MF |
2191 | } |
2192 | ||
2193 | struct o2hb_heartbeat_group_attribute { | |
2194 | struct configfs_attribute attr; | |
2195 | ssize_t (*show)(struct o2hb_heartbeat_group *, char *); | |
2196 | ssize_t (*store)(struct o2hb_heartbeat_group *, const char *, size_t); | |
2197 | }; | |
2198 | ||
2199 | static ssize_t o2hb_heartbeat_group_show(struct config_item *item, | |
2200 | struct configfs_attribute *attr, | |
2201 | char *page) | |
2202 | { | |
2203 | struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item)); | |
2204 | struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr = | |
2205 | container_of(attr, struct o2hb_heartbeat_group_attribute, attr); | |
2206 | ssize_t ret = 0; | |
2207 | ||
2208 | if (o2hb_heartbeat_group_attr->show) | |
2209 | ret = o2hb_heartbeat_group_attr->show(reg, page); | |
2210 | return ret; | |
2211 | } | |
2212 | ||
2213 | static ssize_t o2hb_heartbeat_group_store(struct config_item *item, | |
2214 | struct configfs_attribute *attr, | |
2215 | const char *page, size_t count) | |
2216 | { | |
2217 | struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item)); | |
2218 | struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr = | |
2219 | container_of(attr, struct o2hb_heartbeat_group_attribute, attr); | |
2220 | ssize_t ret = -EINVAL; | |
2221 | ||
2222 | if (o2hb_heartbeat_group_attr->store) | |
2223 | ret = o2hb_heartbeat_group_attr->store(reg, page, count); | |
2224 | return ret; | |
2225 | } | |
2226 | ||
2227 | static ssize_t o2hb_heartbeat_group_threshold_show(struct o2hb_heartbeat_group *group, | |
2228 | char *page) | |
2229 | { | |
2230 | return sprintf(page, "%u\n", o2hb_dead_threshold); | |
2231 | } | |
2232 | ||
2233 | static ssize_t o2hb_heartbeat_group_threshold_store(struct o2hb_heartbeat_group *group, | |
2234 | const char *page, | |
2235 | size_t count) | |
2236 | { | |
2237 | unsigned long tmp; | |
2238 | char *p = (char *)page; | |
2239 | ||
2240 | tmp = simple_strtoul(p, &p, 10); | |
2241 | if (!p || (*p && (*p != '\n'))) | |
2242 | return -EINVAL; | |
2243 | ||
2244 | /* this will validate ranges for us. */ | |
2245 | o2hb_dead_threshold_set((unsigned int) tmp); | |
2246 | ||
2247 | return count; | |
2248 | } | |
2249 | ||
54b5187b SM |
2250 | static |
2251 | ssize_t o2hb_heartbeat_group_mode_show(struct o2hb_heartbeat_group *group, | |
2252 | char *page) | |
2253 | { | |
2254 | return sprintf(page, "%s\n", | |
2255 | o2hb_heartbeat_mode_desc[o2hb_heartbeat_mode]); | |
2256 | } | |
2257 | ||
2258 | static | |
2259 | ssize_t o2hb_heartbeat_group_mode_store(struct o2hb_heartbeat_group *group, | |
2260 | const char *page, size_t count) | |
2261 | { | |
2262 | unsigned int i; | |
2263 | int ret; | |
2264 | size_t len; | |
2265 | ||
2266 | len = (page[count - 1] == '\n') ? count - 1 : count; | |
2267 | if (!len) | |
2268 | return -EINVAL; | |
2269 | ||
2270 | for (i = 0; i < O2HB_HEARTBEAT_NUM_MODES; ++i) { | |
2271 | if (strnicmp(page, o2hb_heartbeat_mode_desc[i], len)) | |
2272 | continue; | |
2273 | ||
2274 | ret = o2hb_global_hearbeat_mode_set(i); | |
2275 | if (!ret) | |
18c50cb0 | 2276 | printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n", |
54b5187b SM |
2277 | o2hb_heartbeat_mode_desc[i]); |
2278 | return count; | |
2279 | } | |
2280 | ||
2281 | return -EINVAL; | |
2282 | ||
2283 | } | |
2284 | ||
a7f6a5fb MF |
2285 | static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_threshold = { |
2286 | .attr = { .ca_owner = THIS_MODULE, | |
2287 | .ca_name = "dead_threshold", | |
2288 | .ca_mode = S_IRUGO | S_IWUSR }, | |
2289 | .show = o2hb_heartbeat_group_threshold_show, | |
2290 | .store = o2hb_heartbeat_group_threshold_store, | |
2291 | }; | |
2292 | ||
54b5187b SM |
2293 | static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_mode = { |
2294 | .attr = { .ca_owner = THIS_MODULE, | |
2295 | .ca_name = "mode", | |
2296 | .ca_mode = S_IRUGO | S_IWUSR }, | |
2297 | .show = o2hb_heartbeat_group_mode_show, | |
2298 | .store = o2hb_heartbeat_group_mode_store, | |
2299 | }; | |
2300 | ||
a7f6a5fb MF |
2301 | static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { |
2302 | &o2hb_heartbeat_group_attr_threshold.attr, | |
54b5187b | 2303 | &o2hb_heartbeat_group_attr_mode.attr, |
a7f6a5fb MF |
2304 | NULL, |
2305 | }; | |
2306 | ||
2307 | static struct configfs_item_operations o2hb_hearbeat_group_item_ops = { | |
2308 | .show_attribute = o2hb_heartbeat_group_show, | |
2309 | .store_attribute = o2hb_heartbeat_group_store, | |
2310 | }; | |
2311 | ||
2312 | static struct configfs_group_operations o2hb_heartbeat_group_group_ops = { | |
2313 | .make_item = o2hb_heartbeat_group_make_item, | |
2314 | .drop_item = o2hb_heartbeat_group_drop_item, | |
2315 | }; | |
2316 | ||
2317 | static struct config_item_type o2hb_heartbeat_group_type = { | |
2318 | .ct_group_ops = &o2hb_heartbeat_group_group_ops, | |
2319 | .ct_item_ops = &o2hb_hearbeat_group_item_ops, | |
2320 | .ct_attrs = o2hb_heartbeat_group_attrs, | |
2321 | .ct_owner = THIS_MODULE, | |
2322 | }; | |
2323 | ||
2324 | /* this is just here to avoid touching group in heartbeat.h which the | |
2325 | * entire damn world #includes */ | |
2326 | struct config_group *o2hb_alloc_hb_set(void) | |
2327 | { | |
2328 | struct o2hb_heartbeat_group *hs = NULL; | |
2329 | struct config_group *ret = NULL; | |
2330 | ||
cd861280 | 2331 | hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL); |
a7f6a5fb MF |
2332 | if (hs == NULL) |
2333 | goto out; | |
2334 | ||
2335 | config_group_init_type_name(&hs->hs_group, "heartbeat", | |
2336 | &o2hb_heartbeat_group_type); | |
2337 | ||
2338 | ret = &hs->hs_group; | |
2339 | out: | |
2340 | if (ret == NULL) | |
2341 | kfree(hs); | |
2342 | return ret; | |
2343 | } | |
2344 | ||
2345 | void o2hb_free_hb_set(struct config_group *group) | |
2346 | { | |
2347 | struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group); | |
2348 | kfree(hs); | |
2349 | } | |
2350 | ||
25985edc | 2351 | /* hb callback registration and issuing */ |
a7f6a5fb MF |
2352 | |
2353 | static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type) | |
2354 | { | |
2355 | if (type == O2HB_NUM_CB) | |
2356 | return ERR_PTR(-EINVAL); | |
2357 | ||
2358 | return &o2hb_callbacks[type]; | |
2359 | } | |
2360 | ||
2361 | void o2hb_setup_callback(struct o2hb_callback_func *hc, | |
2362 | enum o2hb_callback_type type, | |
2363 | o2hb_cb_func *func, | |
2364 | void *data, | |
2365 | int priority) | |
2366 | { | |
2367 | INIT_LIST_HEAD(&hc->hc_item); | |
2368 | hc->hc_func = func; | |
2369 | hc->hc_data = data; | |
2370 | hc->hc_priority = priority; | |
2371 | hc->hc_type = type; | |
2372 | hc->hc_magic = O2HB_CB_MAGIC; | |
2373 | } | |
2374 | EXPORT_SYMBOL_GPL(o2hb_setup_callback); | |
2375 | ||
58a3158a SM |
2376 | /* |
2377 | * In local heartbeat mode, region_uuid passed matches the dlm domain name. | |
2378 | * In global heartbeat mode, region_uuid passed is NULL. | |
2379 | * | |
2380 | * In local, we only pin the matching region. In global we pin all the active | |
2381 | * regions. | |
2382 | */ | |
2383 | static int o2hb_region_pin(const char *region_uuid) | |
14829422 | 2384 | { |
58a3158a SM |
2385 | int ret = 0, found = 0; |
2386 | struct o2hb_region *reg; | |
2387 | char *uuid; | |
14829422 JB |
2388 | |
2389 | assert_spin_locked(&o2hb_live_lock); | |
2390 | ||
58a3158a SM |
2391 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { |
2392 | uuid = config_item_name(®->hr_item); | |
2393 | ||
2394 | /* local heartbeat */ | |
2395 | if (region_uuid) { | |
2396 | if (strcmp(region_uuid, uuid)) | |
2397 | continue; | |
2398 | found = 1; | |
14829422 | 2399 | } |
58a3158a SM |
2400 | |
2401 | if (reg->hr_item_pinned || reg->hr_item_dropped) | |
2402 | goto skip_pin; | |
2403 | ||
2404 | /* Ignore ENOENT only for local hb (userdlm domain) */ | |
2405 | ret = o2nm_depend_item(®->hr_item); | |
2406 | if (!ret) { | |
2407 | mlog(ML_CLUSTER, "Pin region %s\n", uuid); | |
2408 | reg->hr_item_pinned = 1; | |
2409 | } else { | |
2410 | if (ret == -ENOENT && found) | |
2411 | ret = 0; | |
2412 | else { | |
2413 | mlog(ML_ERROR, "Pin region %s fails with %d\n", | |
2414 | uuid, ret); | |
2415 | break; | |
2416 | } | |
14829422 | 2417 | } |
58a3158a SM |
2418 | skip_pin: |
2419 | if (found) | |
2420 | break; | |
14829422 JB |
2421 | } |
2422 | ||
58a3158a | 2423 | return ret; |
14829422 JB |
2424 | } |
2425 | ||
58a3158a SM |
2426 | /* |
2427 | * In local heartbeat mode, region_uuid passed matches the dlm domain name. | |
2428 | * In global heartbeat mode, region_uuid passed is NULL. | |
2429 | * | |
2430 | * In local, we only unpin the matching region. In global we unpin all the | |
2431 | * active regions. | |
2432 | */ | |
2433 | static void o2hb_region_unpin(const char *region_uuid) | |
14829422 | 2434 | { |
14829422 | 2435 | struct o2hb_region *reg; |
58a3158a SM |
2436 | char *uuid; |
2437 | int found = 0; | |
14829422 | 2438 | |
58a3158a | 2439 | assert_spin_locked(&o2hb_live_lock); |
14829422 | 2440 | |
58a3158a SM |
2441 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { |
2442 | uuid = config_item_name(®->hr_item); | |
2443 | if (region_uuid) { | |
2444 | if (strcmp(region_uuid, uuid)) | |
2445 | continue; | |
2446 | found = 1; | |
2447 | } | |
14829422 | 2448 | |
58a3158a SM |
2449 | if (reg->hr_item_pinned) { |
2450 | mlog(ML_CLUSTER, "Unpin region %s\n", uuid); | |
2451 | o2nm_undepend_item(®->hr_item); | |
2452 | reg->hr_item_pinned = 0; | |
2453 | } | |
2454 | if (found) | |
2455 | break; | |
2456 | } | |
2457 | } | |
16c6a4f2 | 2458 | |
58a3158a SM |
2459 | static int o2hb_region_inc_user(const char *region_uuid) |
2460 | { | |
2461 | int ret = 0; | |
14829422 | 2462 | |
58a3158a | 2463 | spin_lock(&o2hb_live_lock); |
16c6a4f2 | 2464 | |
58a3158a SM |
2465 | /* local heartbeat */ |
2466 | if (!o2hb_global_heartbeat_active()) { | |
2467 | ret = o2hb_region_pin(region_uuid); | |
2468 | goto unlock; | |
2469 | } | |
2470 | ||
2471 | /* | |
2472 | * if global heartbeat active and this is the first dependent user, | |
2473 | * pin all regions if quorum region count <= CUT_OFF | |
2474 | */ | |
2475 | o2hb_dependent_users++; | |
2476 | if (o2hb_dependent_users > 1) | |
2477 | goto unlock; | |
2478 | ||
2479 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | |
2480 | O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) | |
2481 | ret = o2hb_region_pin(NULL); | |
2482 | ||
2483 | unlock: | |
2484 | spin_unlock(&o2hb_live_lock); | |
14829422 JB |
2485 | return ret; |
2486 | } | |
2487 | ||
58a3158a | 2488 | void o2hb_region_dec_user(const char *region_uuid) |
14829422 | 2489 | { |
14829422 JB |
2490 | spin_lock(&o2hb_live_lock); |
2491 | ||
58a3158a SM |
2492 | /* local heartbeat */ |
2493 | if (!o2hb_global_heartbeat_active()) { | |
2494 | o2hb_region_unpin(region_uuid); | |
2495 | goto unlock; | |
2496 | } | |
14829422 | 2497 | |
58a3158a SM |
2498 | /* |
2499 | * if global heartbeat active and there are no dependent users, | |
2500 | * unpin all quorum regions | |
2501 | */ | |
2502 | o2hb_dependent_users--; | |
2503 | if (!o2hb_dependent_users) | |
2504 | o2hb_region_unpin(NULL); | |
14829422 | 2505 | |
58a3158a SM |
2506 | unlock: |
2507 | spin_unlock(&o2hb_live_lock); | |
14829422 JB |
2508 | } |
2509 | ||
2510 | int o2hb_register_callback(const char *region_uuid, | |
2511 | struct o2hb_callback_func *hc) | |
a7f6a5fb MF |
2512 | { |
2513 | struct o2hb_callback_func *tmp; | |
2514 | struct list_head *iter; | |
2515 | struct o2hb_callback *hbcall; | |
2516 | int ret; | |
2517 | ||
2518 | BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); | |
2519 | BUG_ON(!list_empty(&hc->hc_item)); | |
2520 | ||
2521 | hbcall = hbcall_from_type(hc->hc_type); | |
2522 | if (IS_ERR(hbcall)) { | |
2523 | ret = PTR_ERR(hbcall); | |
2524 | goto out; | |
2525 | } | |
2526 | ||
14829422 | 2527 | if (region_uuid) { |
58a3158a SM |
2528 | ret = o2hb_region_inc_user(region_uuid); |
2529 | if (ret) { | |
2530 | mlog_errno(ret); | |
14829422 | 2531 | goto out; |
58a3158a | 2532 | } |
14829422 JB |
2533 | } |
2534 | ||
a7f6a5fb MF |
2535 | down_write(&o2hb_callback_sem); |
2536 | ||
2537 | list_for_each(iter, &hbcall->list) { | |
2538 | tmp = list_entry(iter, struct o2hb_callback_func, hc_item); | |
2539 | if (hc->hc_priority < tmp->hc_priority) { | |
2540 | list_add_tail(&hc->hc_item, iter); | |
2541 | break; | |
2542 | } | |
2543 | } | |
2544 | if (list_empty(&hc->hc_item)) | |
2545 | list_add_tail(&hc->hc_item, &hbcall->list); | |
2546 | ||
2547 | up_write(&o2hb_callback_sem); | |
2548 | ret = 0; | |
2549 | out: | |
58a3158a | 2550 | mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n", |
a7f6a5fb MF |
2551 | ret, __builtin_return_address(0), hc); |
2552 | return ret; | |
2553 | } | |
2554 | EXPORT_SYMBOL_GPL(o2hb_register_callback); | |
2555 | ||
14829422 JB |
2556 | void o2hb_unregister_callback(const char *region_uuid, |
2557 | struct o2hb_callback_func *hc) | |
a7f6a5fb MF |
2558 | { |
2559 | BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); | |
2560 | ||
58a3158a | 2561 | mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n", |
a7f6a5fb MF |
2562 | __builtin_return_address(0), hc); |
2563 | ||
14829422 | 2564 | /* XXX Can this happen _with_ a region reference? */ |
a7f6a5fb | 2565 | if (list_empty(&hc->hc_item)) |
c24f72cc | 2566 | return; |
a7f6a5fb | 2567 | |
14829422 | 2568 | if (region_uuid) |
58a3158a | 2569 | o2hb_region_dec_user(region_uuid); |
14829422 | 2570 | |
a7f6a5fb MF |
2571 | down_write(&o2hb_callback_sem); |
2572 | ||
2573 | list_del_init(&hc->hc_item); | |
2574 | ||
2575 | up_write(&o2hb_callback_sem); | |
a7f6a5fb MF |
2576 | } |
2577 | EXPORT_SYMBOL_GPL(o2hb_unregister_callback); | |
2578 | ||
2579 | int o2hb_check_node_heartbeating(u8 node_num) | |
2580 | { | |
2581 | unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
2582 | ||
2583 | o2hb_fill_node_map(testing_map, sizeof(testing_map)); | |
2584 | if (!test_bit(node_num, testing_map)) { | |
2585 | mlog(ML_HEARTBEAT, | |
2586 | "node (%u) does not have heartbeating enabled.\n", | |
2587 | node_num); | |
2588 | return 0; | |
2589 | } | |
2590 | ||
2591 | return 1; | |
2592 | } | |
2593 | EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating); | |
2594 | ||
2595 | int o2hb_check_node_heartbeating_from_callback(u8 node_num) | |
2596 | { | |
2597 | unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
2598 | ||
2599 | o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map)); | |
2600 | if (!test_bit(node_num, testing_map)) { | |
2601 | mlog(ML_HEARTBEAT, | |
2602 | "node (%u) does not have heartbeating enabled.\n", | |
2603 | node_num); | |
2604 | return 0; | |
2605 | } | |
2606 | ||
2607 | return 1; | |
2608 | } | |
2609 | EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback); | |
2610 | ||
2611 | /* Makes sure our local node is configured with a node number, and is | |
2612 | * heartbeating. */ | |
2613 | int o2hb_check_local_node_heartbeating(void) | |
2614 | { | |
2615 | u8 node_num; | |
2616 | ||
2617 | /* if this node was set then we have networking */ | |
2618 | node_num = o2nm_this_node(); | |
2619 | if (node_num == O2NM_MAX_NODES) { | |
2620 | mlog(ML_HEARTBEAT, "this node has not been configured.\n"); | |
2621 | return 0; | |
2622 | } | |
2623 | ||
2624 | return o2hb_check_node_heartbeating(node_num); | |
2625 | } | |
2626 | EXPORT_SYMBOL_GPL(o2hb_check_local_node_heartbeating); | |
2627 | ||
2628 | /* | |
2629 | * this is just a hack until we get the plumbing which flips file systems | |
2630 | * read only and drops the hb ref instead of killing the node dead. | |
2631 | */ | |
2632 | void o2hb_stop_all_regions(void) | |
2633 | { | |
2634 | struct o2hb_region *reg; | |
2635 | ||
2636 | mlog(ML_ERROR, "stopping heartbeat on all active regions.\n"); | |
2637 | ||
2638 | spin_lock(&o2hb_live_lock); | |
2639 | ||
2640 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) | |
2641 | reg->hr_unclean_stop = 1; | |
2642 | ||
2643 | spin_unlock(&o2hb_live_lock); | |
2644 | } | |
2645 | EXPORT_SYMBOL_GPL(o2hb_stop_all_regions); | |
b3c85c4c SM |
2646 | |
2647 | int o2hb_get_all_regions(char *region_uuids, u8 max_regions) | |
2648 | { | |
2649 | struct o2hb_region *reg; | |
2650 | int numregs = 0; | |
2651 | char *p; | |
2652 | ||
2653 | spin_lock(&o2hb_live_lock); | |
2654 | ||
2655 | p = region_uuids; | |
2656 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { | |
2657 | mlog(0, "Region: %s\n", config_item_name(®->hr_item)); | |
2658 | if (numregs < max_regions) { | |
2659 | memcpy(p, config_item_name(®->hr_item), | |
2660 | O2HB_MAX_REGION_NAME_LEN); | |
2661 | p += O2HB_MAX_REGION_NAME_LEN; | |
2662 | } | |
2663 | numregs++; | |
2664 | } | |
2665 | ||
2666 | spin_unlock(&o2hb_live_lock); | |
2667 | ||
2668 | return numregs; | |
2669 | } | |
2670 | EXPORT_SYMBOL_GPL(o2hb_get_all_regions); | |
2671 | ||
2672 | int o2hb_global_heartbeat_active(void) | |
2673 | { | |
4d94aa1b | 2674 | return (o2hb_heartbeat_mode == O2HB_HEARTBEAT_GLOBAL); |
b3c85c4c SM |
2675 | } |
2676 | EXPORT_SYMBOL(o2hb_global_heartbeat_active); |