]> Git Repo - linux.git/blob - drivers/md/persistent-data/dm-array.c
dm: add missing SPDX-License-Indentifiers
[linux.git] / drivers / md / persistent-data / dm-array.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-array.h"
9 #include "dm-space-map.h"
10 #include "dm-transaction-manager.h"
11
12 #include <linux/export.h>
13 #include <linux/device-mapper.h>
14
15 #define DM_MSG_PREFIX "array"
16
17 /*----------------------------------------------------------------*/
18
19 /*
20  * The array is implemented as a fully populated btree, which points to
21  * blocks that contain the packed values.  This is more space efficient
22  * than just using a btree since we don't store 1 key per value.
23  */
24 struct array_block {
25         __le32 csum;
26         __le32 max_entries;
27         __le32 nr_entries;
28         __le32 value_size;
29         __le64 blocknr; /* Block this node is supposed to live in. */
30 } __packed;
31
32 /*----------------------------------------------------------------*/
33
34 /*
35  * Validator methods.  As usual we calculate a checksum, and also write the
36  * block location into the header (paranoia about ssds remapping areas by
37  * mistake).
38  */
39 #define CSUM_XOR 595846735
40
41 static void array_block_prepare_for_write(struct dm_block_validator *v,
42                                           struct dm_block *b,
43                                           size_t size_of_block)
44 {
45         struct array_block *bh_le = dm_block_data(b);
46
47         bh_le->blocknr = cpu_to_le64(dm_block_location(b));
48         bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
49                                                  size_of_block - sizeof(__le32),
50                                                  CSUM_XOR));
51 }
52
53 static int array_block_check(struct dm_block_validator *v,
54                              struct dm_block *b,
55                              size_t size_of_block)
56 {
57         struct array_block *bh_le = dm_block_data(b);
58         __le32 csum_disk;
59
60         if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
61                 DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
62                             (unsigned long long) le64_to_cpu(bh_le->blocknr),
63                             (unsigned long long) dm_block_location(b));
64                 return -ENOTBLK;
65         }
66
67         csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
68                                                size_of_block - sizeof(__le32),
69                                                CSUM_XOR));
70         if (csum_disk != bh_le->csum) {
71                 DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
72                             (unsigned) le32_to_cpu(csum_disk),
73                             (unsigned) le32_to_cpu(bh_le->csum));
74                 return -EILSEQ;
75         }
76
77         return 0;
78 }
79
80 static struct dm_block_validator array_validator = {
81         .name = "array",
82         .prepare_for_write = array_block_prepare_for_write,
83         .check = array_block_check
84 };
85
86 /*----------------------------------------------------------------*/
87
88 /*
89  * Functions for manipulating the array blocks.
90  */
91
92 /*
93  * Returns a pointer to a value within an array block.
94  *
95  * index - The index into _this_ specific block.
96  */
97 static void *element_at(struct dm_array_info *info, struct array_block *ab,
98                         unsigned index)
99 {
100         unsigned char *entry = (unsigned char *) (ab + 1);
101
102         entry += index * info->value_type.size;
103
104         return entry;
105 }
106
107 /*
108  * Utility function that calls one of the value_type methods on every value
109  * in an array block.
110  */
111 static void on_entries(struct dm_array_info *info, struct array_block *ab,
112                        void (*fn)(void *, const void *, unsigned))
113 {
114         unsigned nr_entries = le32_to_cpu(ab->nr_entries);
115         fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
116 }
117
118 /*
119  * Increment every value in an array block.
120  */
121 static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
122 {
123         struct dm_btree_value_type *vt = &info->value_type;
124
125         if (vt->inc)
126                 on_entries(info, ab, vt->inc);
127 }
128
129 /*
130  * Decrement every value in an array block.
131  */
132 static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
133 {
134         struct dm_btree_value_type *vt = &info->value_type;
135
136         if (vt->dec)
137                 on_entries(info, ab, vt->dec);
138 }
139
140 /*
141  * Each array block can hold this many values.
142  */
143 static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
144 {
145         return (size_of_block - sizeof(struct array_block)) / value_size;
146 }
147
148 /*
149  * Allocate a new array block.  The caller will need to unlock block.
150  */
151 static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
152                         uint32_t max_entries,
153                         struct dm_block **block, struct array_block **ab)
154 {
155         int r;
156
157         r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
158         if (r)
159                 return r;
160
161         (*ab) = dm_block_data(*block);
162         (*ab)->max_entries = cpu_to_le32(max_entries);
163         (*ab)->nr_entries = cpu_to_le32(0);
164         (*ab)->value_size = cpu_to_le32(info->value_type.size);
165
166         return 0;
167 }
168
169 /*
170  * Pad an array block out with a particular value.  Every instance will
171  * cause an increment of the value_type.  new_nr must always be more than
172  * the current number of entries.
173  */
174 static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
175                         const void *value, unsigned new_nr)
176 {
177         uint32_t nr_entries, delta, i;
178         struct dm_btree_value_type *vt = &info->value_type;
179
180         BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
181         BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
182
183         nr_entries = le32_to_cpu(ab->nr_entries);
184         delta = new_nr - nr_entries;
185         if (vt->inc)
186                 vt->inc(vt->context, value, delta);
187         for (i = nr_entries; i < new_nr; i++)
188                 memcpy(element_at(info, ab, i), value, vt->size);
189         ab->nr_entries = cpu_to_le32(new_nr);
190 }
191
192 /*
193  * Remove some entries from the back of an array block.  Every value
194  * removed will be decremented.  new_nr must be <= the current number of
195  * entries.
196  */
197 static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
198                         unsigned new_nr)
199 {
200         uint32_t nr_entries, delta;
201         struct dm_btree_value_type *vt = &info->value_type;
202
203         BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
204         BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
205
206         nr_entries = le32_to_cpu(ab->nr_entries);
207         delta = nr_entries - new_nr;
208         if (vt->dec)
209                 vt->dec(vt->context, element_at(info, ab, new_nr - 1), delta);
210         ab->nr_entries = cpu_to_le32(new_nr);
211 }
212
213 /*
214  * Read locks a block, and coerces it to an array block.  The caller must
215  * unlock 'block' when finished.
216  */
217 static int get_ablock(struct dm_array_info *info, dm_block_t b,
218                       struct dm_block **block, struct array_block **ab)
219 {
220         int r;
221
222         r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
223         if (r)
224                 return r;
225
226         *ab = dm_block_data(*block);
227         return 0;
228 }
229
230 /*
231  * Unlocks an array block.
232  */
233 static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
234 {
235         dm_tm_unlock(info->btree_info.tm, block);
236 }
237
238 /*----------------------------------------------------------------*/
239
240 /*
241  * Btree manipulation.
242  */
243
244 /*
245  * Looks up an array block in the btree, and then read locks it.
246  *
247  * index is the index of the index of the array_block, (ie. the array index
248  * / max_entries).
249  */
250 static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
251                          unsigned index, struct dm_block **block,
252                          struct array_block **ab)
253 {
254         int r;
255         uint64_t key = index;
256         __le64 block_le;
257
258         r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
259         if (r)
260                 return r;
261
262         return get_ablock(info, le64_to_cpu(block_le), block, ab);
263 }
264
265 /*
266  * Insert an array block into the btree.  The block is _not_ unlocked.
267  */
268 static int insert_ablock(struct dm_array_info *info, uint64_t index,
269                          struct dm_block *block, dm_block_t *root)
270 {
271         __le64 block_le = cpu_to_le64(dm_block_location(block));
272
273         __dm_bless_for_disk(block_le);
274         return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
275 }
276
277 /*----------------------------------------------------------------*/
278
279 static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
280                            struct dm_block **block, struct array_block **ab)
281 {
282         int inc;
283         int r = dm_tm_shadow_block(info->btree_info.tm, b,
284                                    &array_validator, block, &inc);
285         if (r)
286                 return r;
287
288         *ab = dm_block_data(*block);
289         if (inc)
290                 inc_ablock_entries(info, *ab);
291
292         return 0;
293 }
294
295 /*
296  * The shadow op will often be a noop.  Only insert if it really
297  * copied data.
298  */
299 static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
300                              struct dm_block *block, dm_block_t b,
301                              dm_block_t *root)
302 {
303         int r = 0;
304
305         if (dm_block_location(block) != b) {
306                 /*
307                  * dm_tm_shadow_block will have already decremented the old
308                  * block, but it is still referenced by the btree.  We
309                  * increment to stop the insert decrementing it below zero
310                  * when overwriting the old value.
311                  */
312                 dm_tm_inc(info->btree_info.tm, b);
313                 r = insert_ablock(info, index, block, root);
314         }
315
316         return r;
317 }
318
319 /*
320  * Looks up an array block in the btree.  Then shadows it, and updates the
321  * btree to point to this new shadow.  'root' is an input/output parameter
322  * for both the current root block, and the new one.
323  */
324 static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
325                          unsigned index, struct dm_block **block,
326                          struct array_block **ab)
327 {
328         int r;
329         uint64_t key = index;
330         dm_block_t b;
331         __le64 block_le;
332
333         r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
334         if (r)
335                 return r;
336         b = le64_to_cpu(block_le);
337
338         r = __shadow_ablock(info, b, block, ab);
339         if (r)
340                 return r;
341
342         return __reinsert_ablock(info, index, *block, b, root);
343 }
344
345 /*
346  * Allocate an new array block, and fill it with some values.
347  */
348 static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
349                              uint32_t max_entries,
350                              unsigned block_index, uint32_t nr,
351                              const void *value, dm_block_t *root)
352 {
353         int r;
354         struct dm_block *block;
355         struct array_block *ab;
356
357         r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
358         if (r)
359                 return r;
360
361         fill_ablock(info, ab, value, nr);
362         r = insert_ablock(info, block_index, block, root);
363         unlock_ablock(info, block);
364
365         return r;
366 }
367
368 static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
369                                unsigned begin_block, unsigned end_block,
370                                unsigned max_entries, const void *value,
371                                dm_block_t *root)
372 {
373         int r = 0;
374
375         for (; !r && begin_block != end_block; begin_block++)
376                 r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
377
378         return r;
379 }
380
381 /*
382  * There are a bunch of functions involved with resizing an array.  This
383  * structure holds information that commonly needed by them.  Purely here
384  * to reduce parameter count.
385  */
386 struct resize {
387         /*
388          * Describes the array.
389          */
390         struct dm_array_info *info;
391
392         /*
393          * The current root of the array.  This gets updated.
394          */
395         dm_block_t root;
396
397         /*
398          * Metadata block size.  Used to calculate the nr entries in an
399          * array block.
400          */
401         size_t size_of_block;
402
403         /*
404          * Maximum nr entries in an array block.
405          */
406         unsigned max_entries;
407
408         /*
409          * nr of completely full blocks in the array.
410          *
411          * 'old' refers to before the resize, 'new' after.
412          */
413         unsigned old_nr_full_blocks, new_nr_full_blocks;
414
415         /*
416          * Number of entries in the final block.  0 iff only full blocks in
417          * the array.
418          */
419         unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
420
421         /*
422          * The default value used when growing the array.
423          */
424         const void *value;
425 };
426
427 /*
428  * Removes a consecutive set of array blocks from the btree.  The values
429  * in block are decremented as a side effect of the btree remove.
430  *
431  * begin_index - the index of the first array block to remove.
432  * end_index - the one-past-the-end value.  ie. this block is not removed.
433  */
434 static int drop_blocks(struct resize *resize, unsigned begin_index,
435                        unsigned end_index)
436 {
437         int r;
438
439         while (begin_index != end_index) {
440                 uint64_t key = begin_index++;
441                 r = dm_btree_remove(&resize->info->btree_info, resize->root,
442                                     &key, &resize->root);
443                 if (r)
444                         return r;
445         }
446
447         return 0;
448 }
449
450 /*
451  * Calculates how many blocks are needed for the array.
452  */
453 static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
454                                        unsigned nr_entries_in_last_block)
455 {
456         return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
457 }
458
459 /*
460  * Shrink an array.
461  */
462 static int shrink(struct resize *resize)
463 {
464         int r;
465         unsigned begin, end;
466         struct dm_block *block;
467         struct array_block *ab;
468
469         /*
470          * Lose some blocks from the back?
471          */
472         if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
473                 begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
474                                                resize->new_nr_entries_in_last_block);
475                 end = total_nr_blocks_needed(resize->old_nr_full_blocks,
476                                              resize->old_nr_entries_in_last_block);
477
478                 r = drop_blocks(resize, begin, end);
479                 if (r)
480                         return r;
481         }
482
483         /*
484          * Trim the new tail block
485          */
486         if (resize->new_nr_entries_in_last_block) {
487                 r = shadow_ablock(resize->info, &resize->root,
488                                   resize->new_nr_full_blocks, &block, &ab);
489                 if (r)
490                         return r;
491
492                 trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
493                 unlock_ablock(resize->info, block);
494         }
495
496         return 0;
497 }
498
499 /*
500  * Grow an array.
501  */
502 static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
503 {
504         int r;
505         struct dm_block *block;
506         struct array_block *ab;
507
508         r = shadow_ablock(resize->info, &resize->root,
509                           resize->old_nr_full_blocks, &block, &ab);
510         if (r)
511                 return r;
512
513         fill_ablock(resize->info, ab, resize->value, new_nr_entries);
514         unlock_ablock(resize->info, block);
515
516         return r;
517 }
518
519 static int grow_add_tail_block(struct resize *resize)
520 {
521         return insert_new_ablock(resize->info, resize->size_of_block,
522                                  resize->max_entries,
523                                  resize->new_nr_full_blocks,
524                                  resize->new_nr_entries_in_last_block,
525                                  resize->value, &resize->root);
526 }
527
528 static int grow_needs_more_blocks(struct resize *resize)
529 {
530         int r;
531         unsigned old_nr_blocks = resize->old_nr_full_blocks;
532
533         if (resize->old_nr_entries_in_last_block > 0) {
534                 old_nr_blocks++;
535
536                 r = grow_extend_tail_block(resize, resize->max_entries);
537                 if (r)
538                         return r;
539         }
540
541         r = insert_full_ablocks(resize->info, resize->size_of_block,
542                                 old_nr_blocks,
543                                 resize->new_nr_full_blocks,
544                                 resize->max_entries, resize->value,
545                                 &resize->root);
546         if (r)
547                 return r;
548
549         if (resize->new_nr_entries_in_last_block)
550                 r = grow_add_tail_block(resize);
551
552         return r;
553 }
554
555 static int grow(struct resize *resize)
556 {
557         if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
558                 return grow_needs_more_blocks(resize);
559
560         else if (resize->old_nr_entries_in_last_block)
561                 return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
562
563         else
564                 return grow_add_tail_block(resize);
565 }
566
567 /*----------------------------------------------------------------*/
568
569 /*
570  * These are the value_type functions for the btree elements, which point
571  * to array blocks.
572  */
573 static void block_inc(void *context, const void *value, unsigned count)
574 {
575         const __le64 *block_le = value;
576         struct dm_array_info *info = context;
577         unsigned i;
578
579         for (i = 0; i < count; i++, block_le++)
580                 dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
581 }
582
583 static void __block_dec(void *context, const void *value)
584 {
585         int r;
586         uint64_t b;
587         __le64 block_le;
588         uint32_t ref_count;
589         struct dm_block *block;
590         struct array_block *ab;
591         struct dm_array_info *info = context;
592
593         memcpy(&block_le, value, sizeof(block_le));
594         b = le64_to_cpu(block_le);
595
596         r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
597         if (r) {
598                 DMERR_LIMIT("couldn't get reference count for block %llu",
599                             (unsigned long long) b);
600                 return;
601         }
602
603         if (ref_count == 1) {
604                 /*
605                  * We're about to drop the last reference to this ablock.
606                  * So we need to decrement the ref count of the contents.
607                  */
608                 r = get_ablock(info, b, &block, &ab);
609                 if (r) {
610                         DMERR_LIMIT("couldn't get array block %llu",
611                                     (unsigned long long) b);
612                         return;
613                 }
614
615                 dec_ablock_entries(info, ab);
616                 unlock_ablock(info, block);
617         }
618
619         dm_tm_dec(info->btree_info.tm, b);
620 }
621
622 static void block_dec(void *context, const void *value, unsigned count)
623 {
624         unsigned i;
625         for (i = 0; i < count; i++, value += sizeof(__le64))
626                 __block_dec(context, value);
627 }
628
629 static int block_equal(void *context, const void *value1, const void *value2)
630 {
631         return !memcmp(value1, value2, sizeof(__le64));
632 }
633
634 /*----------------------------------------------------------------*/
635
636 void dm_array_info_init(struct dm_array_info *info,
637                         struct dm_transaction_manager *tm,
638                         struct dm_btree_value_type *vt)
639 {
640         struct dm_btree_value_type *bvt = &info->btree_info.value_type;
641
642         memcpy(&info->value_type, vt, sizeof(info->value_type));
643         info->btree_info.tm = tm;
644         info->btree_info.levels = 1;
645
646         bvt->context = info;
647         bvt->size = sizeof(__le64);
648         bvt->inc = block_inc;
649         bvt->dec = block_dec;
650         bvt->equal = block_equal;
651 }
652 EXPORT_SYMBOL_GPL(dm_array_info_init);
653
654 int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
655 {
656         return dm_btree_empty(&info->btree_info, root);
657 }
658 EXPORT_SYMBOL_GPL(dm_array_empty);
659
660 static int array_resize(struct dm_array_info *info, dm_block_t root,
661                         uint32_t old_size, uint32_t new_size,
662                         const void *value, dm_block_t *new_root)
663 {
664         int r;
665         struct resize resize;
666
667         if (old_size == new_size) {
668                 *new_root = root;
669                 return 0;
670         }
671
672         resize.info = info;
673         resize.root = root;
674         resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
675         resize.max_entries = calc_max_entries(info->value_type.size,
676                                               resize.size_of_block);
677
678         resize.old_nr_full_blocks = old_size / resize.max_entries;
679         resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
680         resize.new_nr_full_blocks = new_size / resize.max_entries;
681         resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
682         resize.value = value;
683
684         r = ((new_size > old_size) ? grow : shrink)(&resize);
685         if (r)
686                 return r;
687
688         *new_root = resize.root;
689         return 0;
690 }
691
692 int dm_array_resize(struct dm_array_info *info, dm_block_t root,
693                     uint32_t old_size, uint32_t new_size,
694                     const void *value, dm_block_t *new_root)
695                     __dm_written_to_disk(value)
696 {
697         int r = array_resize(info, root, old_size, new_size, value, new_root);
698         __dm_unbless_for_disk(value);
699         return r;
700 }
701 EXPORT_SYMBOL_GPL(dm_array_resize);
702
703 static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
704                                        value_fn fn, void *context, unsigned base, unsigned new_nr)
705 {
706         int r;
707         unsigned i;
708         struct dm_btree_value_type *vt = &info->value_type;
709
710         BUG_ON(le32_to_cpu(ab->nr_entries));
711         BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
712
713         for (i = 0; i < new_nr; i++) {
714                 r = fn(base + i, element_at(info, ab, i), context);
715                 if (r)
716                         return r;
717
718                 if (vt->inc)
719                         vt->inc(vt->context, element_at(info, ab, i), 1);
720         }
721
722         ab->nr_entries = cpu_to_le32(new_nr);
723         return 0;
724 }
725
726 int dm_array_new(struct dm_array_info *info, dm_block_t *root,
727                  uint32_t size, value_fn fn, void *context)
728 {
729         int r;
730         struct dm_block *block;
731         struct array_block *ab;
732         unsigned block_index, end_block, size_of_block, max_entries;
733
734         r = dm_array_empty(info, root);
735         if (r)
736                 return r;
737
738         size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
739         max_entries = calc_max_entries(info->value_type.size, size_of_block);
740         end_block = dm_div_up(size, max_entries);
741
742         for (block_index = 0; block_index != end_block; block_index++) {
743                 r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
744                 if (r)
745                         break;
746
747                 r = populate_ablock_with_values(info, ab, fn, context,
748                                                 block_index * max_entries,
749                                                 min(max_entries, size));
750                 if (r) {
751                         unlock_ablock(info, block);
752                         break;
753                 }
754
755                 r = insert_ablock(info, block_index, block, root);
756                 unlock_ablock(info, block);
757                 if (r)
758                         break;
759
760                 size -= max_entries;
761         }
762
763         return r;
764 }
765 EXPORT_SYMBOL_GPL(dm_array_new);
766
767 int dm_array_del(struct dm_array_info *info, dm_block_t root)
768 {
769         return dm_btree_del(&info->btree_info, root);
770 }
771 EXPORT_SYMBOL_GPL(dm_array_del);
772
773 int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
774                        uint32_t index, void *value_le)
775 {
776         int r;
777         struct dm_block *block;
778         struct array_block *ab;
779         size_t size_of_block;
780         unsigned entry, max_entries;
781
782         size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
783         max_entries = calc_max_entries(info->value_type.size, size_of_block);
784
785         r = lookup_ablock(info, root, index / max_entries, &block, &ab);
786         if (r)
787                 return r;
788
789         entry = index % max_entries;
790         if (entry >= le32_to_cpu(ab->nr_entries))
791                 r = -ENODATA;
792         else
793                 memcpy(value_le, element_at(info, ab, entry),
794                        info->value_type.size);
795
796         unlock_ablock(info, block);
797         return r;
798 }
799 EXPORT_SYMBOL_GPL(dm_array_get_value);
800
801 static int array_set_value(struct dm_array_info *info, dm_block_t root,
802                            uint32_t index, const void *value, dm_block_t *new_root)
803 {
804         int r;
805         struct dm_block *block;
806         struct array_block *ab;
807         size_t size_of_block;
808         unsigned max_entries;
809         unsigned entry;
810         void *old_value;
811         struct dm_btree_value_type *vt = &info->value_type;
812
813         size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
814         max_entries = calc_max_entries(info->value_type.size, size_of_block);
815
816         r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
817         if (r)
818                 return r;
819         *new_root = root;
820
821         entry = index % max_entries;
822         if (entry >= le32_to_cpu(ab->nr_entries)) {
823                 r = -ENODATA;
824                 goto out;
825         }
826
827         old_value = element_at(info, ab, entry);
828         if (vt->dec &&
829             (!vt->equal || !vt->equal(vt->context, old_value, value))) {
830                 vt->dec(vt->context, old_value, 1);
831                 if (vt->inc)
832                         vt->inc(vt->context, value, 1);
833         }
834
835         memcpy(old_value, value, info->value_type.size);
836
837 out:
838         unlock_ablock(info, block);
839         return r;
840 }
841
842 int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
843                  uint32_t index, const void *value, dm_block_t *new_root)
844                  __dm_written_to_disk(value)
845 {
846         int r;
847
848         r = array_set_value(info, root, index, value, new_root);
849         __dm_unbless_for_disk(value);
850         return r;
851 }
852 EXPORT_SYMBOL_GPL(dm_array_set_value);
853
854 struct walk_info {
855         struct dm_array_info *info;
856         int (*fn)(void *context, uint64_t key, void *leaf);
857         void *context;
858 };
859
860 static int walk_ablock(void *context, uint64_t *keys, void *leaf)
861 {
862         struct walk_info *wi = context;
863
864         int r;
865         unsigned i;
866         __le64 block_le;
867         unsigned nr_entries, max_entries;
868         struct dm_block *block;
869         struct array_block *ab;
870
871         memcpy(&block_le, leaf, sizeof(block_le));
872         r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
873         if (r)
874                 return r;
875
876         max_entries = le32_to_cpu(ab->max_entries);
877         nr_entries = le32_to_cpu(ab->nr_entries);
878         for (i = 0; i < nr_entries; i++) {
879                 r = wi->fn(wi->context, keys[0] * max_entries + i,
880                            element_at(wi->info, ab, i));
881
882                 if (r)
883                         break;
884         }
885
886         unlock_ablock(wi->info, block);
887         return r;
888 }
889
890 int dm_array_walk(struct dm_array_info *info, dm_block_t root,
891                   int (*fn)(void *, uint64_t key, void *leaf),
892                   void *context)
893 {
894         struct walk_info wi;
895
896         wi.info = info;
897         wi.fn = fn;
898         wi.context = context;
899
900         return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
901 }
902 EXPORT_SYMBOL_GPL(dm_array_walk);
903
904 /*----------------------------------------------------------------*/
905
906 static int load_ablock(struct dm_array_cursor *c)
907 {
908         int r;
909         __le64 value_le;
910         uint64_t key;
911
912         if (c->block)
913                 unlock_ablock(c->info, c->block);
914
915         c->block = NULL;
916         c->ab = NULL;
917         c->index = 0;
918
919         r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le);
920         if (r) {
921                 DMERR("dm_btree_cursor_get_value failed");
922                 dm_btree_cursor_end(&c->cursor);
923
924         } else {
925                 r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
926                 if (r) {
927                         DMERR("get_ablock failed");
928                         dm_btree_cursor_end(&c->cursor);
929                 }
930         }
931
932         return r;
933 }
934
935 int dm_array_cursor_begin(struct dm_array_info *info, dm_block_t root,
936                           struct dm_array_cursor *c)
937 {
938         int r;
939
940         memset(c, 0, sizeof(*c));
941         c->info = info;
942         r = dm_btree_cursor_begin(&info->btree_info, root, true, &c->cursor);
943         if (r) {
944                 DMERR("couldn't create btree cursor");
945                 return r;
946         }
947
948         return load_ablock(c);
949 }
950 EXPORT_SYMBOL_GPL(dm_array_cursor_begin);
951
952 void dm_array_cursor_end(struct dm_array_cursor *c)
953 {
954         if (c->block) {
955                 unlock_ablock(c->info, c->block);
956                 dm_btree_cursor_end(&c->cursor);
957         }
958 }
959 EXPORT_SYMBOL_GPL(dm_array_cursor_end);
960
961 int dm_array_cursor_next(struct dm_array_cursor *c)
962 {
963         int r;
964
965         if (!c->block)
966                 return -ENODATA;
967
968         c->index++;
969
970         if (c->index >= le32_to_cpu(c->ab->nr_entries)) {
971                 r = dm_btree_cursor_next(&c->cursor);
972                 if (r)
973                         return r;
974
975                 r = load_ablock(c);
976                 if (r)
977                         return r;
978         }
979
980         return 0;
981 }
982 EXPORT_SYMBOL_GPL(dm_array_cursor_next);
983
984 int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count)
985 {
986         int r;
987
988         do {
989                 uint32_t remaining = le32_to_cpu(c->ab->nr_entries) - c->index;
990
991                 if (count < remaining) {
992                         c->index += count;
993                         return 0;
994                 }
995
996                 count -= remaining;
997                 r = dm_array_cursor_next(c);
998
999         } while (!r);
1000
1001         return r;
1002 }
1003 EXPORT_SYMBOL_GPL(dm_array_cursor_skip);
1004
1005 void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le)
1006 {
1007         *value_le = element_at(c->info, c->ab, c->index);
1008 }
1009 EXPORT_SYMBOL_GPL(dm_array_cursor_get_value);
1010
1011 /*----------------------------------------------------------------*/
This page took 0.105539 seconds and 4 git commands to generate.