1 // SPDX-License-Identifier: GPL-2.0
5 // Copyright 2023 Arm Ltd
7 #include <kunit/test.h>
10 #define BLOCK_TEST_SIZE 12
12 static const struct regmap_config test_regmap_config = {
13 .max_register = BLOCK_TEST_SIZE,
15 .val_bits = sizeof(unsigned int) * 8,
18 struct regcache_types {
19 enum regcache_type type;
23 static void case_to_desc(const struct regcache_types *t, char *desc)
25 strcpy(desc, t->name);
28 static const struct regcache_types regcache_types_list[] = {
29 { REGCACHE_NONE, "none" },
30 { REGCACHE_FLAT, "flat" },
31 { REGCACHE_RBTREE, "rbtree" },
32 { REGCACHE_MAPLE, "maple" },
35 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
37 static const struct regcache_types real_cache_types_list[] = {
38 { REGCACHE_FLAT, "flat" },
39 { REGCACHE_RBTREE, "rbtree" },
40 { REGCACHE_MAPLE, "maple" },
43 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
45 static const struct regcache_types sparse_cache_types_list[] = {
46 { REGCACHE_RBTREE, "rbtree" },
47 { REGCACHE_MAPLE, "maple" },
50 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
52 static struct regmap *gen_regmap(struct regmap_config *config,
53 struct regmap_ram_data **data)
57 size_t size = (config->max_register + 1) * sizeof(unsigned int);
59 struct reg_default *defaults;
61 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
62 config->cache_type == REGCACHE_MAPLE;
64 buf = kmalloc(size, GFP_KERNEL);
66 return ERR_PTR(-ENOMEM);
68 get_random_bytes(buf, size);
70 *data = kzalloc(sizeof(**data), GFP_KERNEL);
72 return ERR_PTR(-ENOMEM);
75 if (config->num_reg_defaults) {
76 defaults = kcalloc(config->num_reg_defaults,
77 sizeof(struct reg_default),
80 return ERR_PTR(-ENOMEM);
81 config->reg_defaults = defaults;
83 for (i = 0; i < config->num_reg_defaults; i++) {
84 defaults[i].reg = i * config->reg_stride;
85 defaults[i].def = buf[i * config->reg_stride];
89 ret = regmap_init_ram(config, *data);
98 static bool reg_5_false(struct device *context, unsigned int reg)
103 static void basic_read_write(struct kunit *test)
105 struct regcache_types *t = (struct regcache_types *)test->param_value;
107 struct regmap_config config;
108 struct regmap_ram_data *data;
109 unsigned int val, rval;
111 config = test_regmap_config;
112 config.cache_type = t->type;
114 map = gen_regmap(&config, &data);
115 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
119 get_random_bytes(&val, sizeof(val));
121 /* If we write a value to a register we can read it back */
122 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
123 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
124 KUNIT_EXPECT_EQ(test, val, rval);
126 /* If using a cache the cache satisfied the read */
127 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
132 static void bulk_write(struct kunit *test)
134 struct regcache_types *t = (struct regcache_types *)test->param_value;
136 struct regmap_config config;
137 struct regmap_ram_data *data;
138 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
141 config = test_regmap_config;
142 config.cache_type = t->type;
144 map = gen_regmap(&config, &data);
145 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
149 get_random_bytes(&val, sizeof(val));
152 * Data written via the bulk API can be read back with single
155 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
157 for (i = 0; i < BLOCK_TEST_SIZE; i++)
158 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
160 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
162 /* If using a cache the cache satisfied the read */
163 for (i = 0; i < BLOCK_TEST_SIZE; i++)
164 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
169 static void bulk_read(struct kunit *test)
171 struct regcache_types *t = (struct regcache_types *)test->param_value;
173 struct regmap_config config;
174 struct regmap_ram_data *data;
175 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
178 config = test_regmap_config;
179 config.cache_type = t->type;
181 map = gen_regmap(&config, &data);
182 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
186 get_random_bytes(&val, sizeof(val));
188 /* Data written as single writes can be read via the bulk API */
189 for (i = 0; i < BLOCK_TEST_SIZE; i++)
190 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
191 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
193 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
195 /* If using a cache the cache satisfied the read */
196 for (i = 0; i < BLOCK_TEST_SIZE; i++)
197 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
202 static void write_readonly(struct kunit *test)
204 struct regcache_types *t = (struct regcache_types *)test->param_value;
206 struct regmap_config config;
207 struct regmap_ram_data *data;
211 config = test_regmap_config;
212 config.cache_type = t->type;
213 config.num_reg_defaults = BLOCK_TEST_SIZE;
214 config.writeable_reg = reg_5_false;
216 map = gen_regmap(&config, &data);
217 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
221 get_random_bytes(&val, sizeof(val));
223 for (i = 0; i < BLOCK_TEST_SIZE; i++)
224 data->written[i] = false;
226 /* Change the value of all registers, readonly should fail */
227 for (i = 0; i < BLOCK_TEST_SIZE; i++)
228 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
230 /* Did that match what we see on the device? */
231 for (i = 0; i < BLOCK_TEST_SIZE; i++)
232 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
237 static void read_writeonly(struct kunit *test)
239 struct regcache_types *t = (struct regcache_types *)test->param_value;
241 struct regmap_config config;
242 struct regmap_ram_data *data;
246 config = test_regmap_config;
247 config.cache_type = t->type;
248 config.readable_reg = reg_5_false;
250 map = gen_regmap(&config, &data);
251 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
255 for (i = 0; i < BLOCK_TEST_SIZE; i++)
256 data->read[i] = false;
259 * Try to read all the registers, the writeonly one should
260 * fail if we aren't using the flat cache.
262 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
263 if (t->type != REGCACHE_FLAT) {
264 KUNIT_EXPECT_EQ(test, i != 5,
265 regmap_read(map, i, &val) == 0);
267 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
271 /* Did we trigger a hardware access? */
272 KUNIT_EXPECT_FALSE(test, data->read[5]);
277 static void reg_defaults(struct kunit *test)
279 struct regcache_types *t = (struct regcache_types *)test->param_value;
281 struct regmap_config config;
282 struct regmap_ram_data *data;
283 unsigned int rval[BLOCK_TEST_SIZE];
286 config = test_regmap_config;
287 config.cache_type = t->type;
288 config.num_reg_defaults = BLOCK_TEST_SIZE;
290 map = gen_regmap(&config, &data);
291 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
295 /* Read back the expected default data */
296 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
298 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
300 /* The data should have been read from cache if there was one */
301 for (i = 0; i < BLOCK_TEST_SIZE; i++)
302 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
305 static void reg_defaults_read_dev(struct kunit *test)
307 struct regcache_types *t = (struct regcache_types *)test->param_value;
309 struct regmap_config config;
310 struct regmap_ram_data *data;
311 unsigned int rval[BLOCK_TEST_SIZE];
314 config = test_regmap_config;
315 config.cache_type = t->type;
316 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
318 map = gen_regmap(&config, &data);
319 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
323 /* We should have read the cache defaults back from the map */
324 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
325 KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
326 data->read[i] = false;
329 /* Read back the expected default data */
330 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
332 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
334 /* The data should have been read from cache if there was one */
335 for (i = 0; i < BLOCK_TEST_SIZE; i++)
336 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
339 static void register_patch(struct kunit *test)
341 struct regcache_types *t = (struct regcache_types *)test->param_value;
343 struct regmap_config config;
344 struct regmap_ram_data *data;
345 struct reg_sequence patch[2];
346 unsigned int rval[BLOCK_TEST_SIZE];
349 /* We need defaults so readback works */
350 config = test_regmap_config;
351 config.cache_type = t->type;
352 config.num_reg_defaults = BLOCK_TEST_SIZE;
354 map = gen_regmap(&config, &data);
355 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
359 /* Stash the original values */
360 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
363 /* Patch a couple of values */
365 patch[0].def = rval[2] + 1;
366 patch[0].delay_us = 0;
368 patch[1].def = rval[5] + 1;
369 patch[1].delay_us = 0;
370 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
373 /* Only the patched registers are written */
374 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
378 KUNIT_EXPECT_TRUE(test, data->written[i]);
379 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
382 KUNIT_EXPECT_FALSE(test, data->written[i]);
383 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
391 static void stride(struct kunit *test)
393 struct regcache_types *t = (struct regcache_types *)test->param_value;
395 struct regmap_config config;
396 struct regmap_ram_data *data;
400 config = test_regmap_config;
401 config.cache_type = t->type;
402 config.reg_stride = 2;
403 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
405 map = gen_regmap(&config, &data);
406 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
410 /* Only even registers can be accessed, try both read and write */
411 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
412 data->read[i] = false;
413 data->written[i] = false;
416 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
417 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
418 KUNIT_EXPECT_FALSE(test, data->read[i]);
419 KUNIT_EXPECT_FALSE(test, data->written[i]);
421 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
422 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
423 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
426 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
427 KUNIT_EXPECT_TRUE(test, data->written[i]);
434 static struct regmap_range_cfg test_range = {
436 .selector_mask = 0xff,
445 static bool test_range_volatile(struct device *dev, unsigned int reg)
447 if (reg >= test_range.window_start &&
448 reg <= test_range.selector_reg + test_range.window_len)
451 if (reg >= test_range.range_min && reg <= test_range.range_max)
457 static void basic_ranges(struct kunit *test)
459 struct regcache_types *t = (struct regcache_types *)test->param_value;
461 struct regmap_config config;
462 struct regmap_ram_data *data;
466 config = test_regmap_config;
467 config.cache_type = t->type;
468 config.volatile_reg = test_range_volatile;
469 config.ranges = &test_range;
470 config.num_ranges = 1;
471 config.max_register = test_range.range_max;
473 map = gen_regmap(&config, &data);
474 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
478 for (i = test_range.range_min; i < test_range.range_max; i++) {
479 data->read[i] = false;
480 data->written[i] = false;
483 /* Reset the page to a non-zero value to trigger a change */
484 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
485 test_range.range_max));
487 /* Check we set the page and use the window for writes */
488 data->written[test_range.selector_reg] = false;
489 data->written[test_range.window_start] = false;
490 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
491 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
492 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
494 data->written[test_range.selector_reg] = false;
495 data->written[test_range.window_start] = false;
496 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
497 test_range.range_min +
498 test_range.window_len,
500 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
501 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
504 data->written[test_range.selector_reg] = false;
505 data->read[test_range.window_start] = false;
506 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
507 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
508 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
510 data->written[test_range.selector_reg] = false;
511 data->read[test_range.window_start] = false;
512 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
513 test_range.range_min +
514 test_range.window_len,
516 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
517 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
519 /* No physical access triggered in the virtual range */
520 for (i = test_range.range_min; i < test_range.range_max; i++) {
521 KUNIT_EXPECT_FALSE(test, data->read[i]);
522 KUNIT_EXPECT_FALSE(test, data->written[i]);
528 /* Try to stress dynamic creation of cache data structures */
529 static void stress_insert(struct kunit *test)
531 struct regcache_types *t = (struct regcache_types *)test->param_value;
533 struct regmap_config config;
534 struct regmap_ram_data *data;
535 unsigned int rval, *vals;
539 config = test_regmap_config;
540 config.cache_type = t->type;
541 config.max_register = 300;
543 map = gen_regmap(&config, &data);
544 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
548 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
550 KUNIT_ASSERT_FALSE(test, vals == NULL);
551 buf_sz = sizeof(unsigned long) * config.max_register;
553 get_random_bytes(vals, buf_sz);
555 /* Write data into the map/cache in ever decreasing strides */
556 for (i = 0; i < config.max_register; i += 100)
557 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
558 for (i = 0; i < config.max_register; i += 50)
559 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
560 for (i = 0; i < config.max_register; i += 25)
561 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
562 for (i = 0; i < config.max_register; i += 10)
563 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
564 for (i = 0; i < config.max_register; i += 5)
565 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
566 for (i = 0; i < config.max_register; i += 3)
567 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
568 for (i = 0; i < config.max_register; i += 2)
569 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
570 for (i = 0; i < config.max_register; i++)
571 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
573 /* Do reads from the cache (if there is one) match? */
574 for (i = 0; i < config.max_register; i ++) {
575 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
576 KUNIT_EXPECT_EQ(test, rval, vals[i]);
577 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
583 static void cache_bypass(struct kunit *test)
585 struct regcache_types *t = (struct regcache_types *)test->param_value;
587 struct regmap_config config;
588 struct regmap_ram_data *data;
589 unsigned int val, rval;
591 config = test_regmap_config;
592 config.cache_type = t->type;
594 map = gen_regmap(&config, &data);
595 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
599 get_random_bytes(&val, sizeof(val));
601 /* Ensure the cache has a value in it */
602 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
604 /* Bypass then write a different value */
605 regcache_cache_bypass(map, true);
606 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
608 /* Read the bypassed value */
609 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
610 KUNIT_EXPECT_EQ(test, val + 1, rval);
611 KUNIT_EXPECT_EQ(test, data->vals[0], rval);
613 /* Disable bypass, the cache should still return the original value */
614 regcache_cache_bypass(map, false);
615 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
616 KUNIT_EXPECT_EQ(test, val, rval);
621 static void cache_sync(struct kunit *test)
623 struct regcache_types *t = (struct regcache_types *)test->param_value;
625 struct regmap_config config;
626 struct regmap_ram_data *data;
627 unsigned int val[BLOCK_TEST_SIZE];
630 config = test_regmap_config;
631 config.cache_type = t->type;
633 map = gen_regmap(&config, &data);
634 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
638 get_random_bytes(&val, sizeof(val));
640 /* Put some data into the cache */
641 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
643 for (i = 0; i < BLOCK_TEST_SIZE; i++)
644 data->written[i] = false;
646 /* Trash the data on the device itself then resync */
647 regcache_mark_dirty(map);
648 memset(data->vals, 0, sizeof(val));
649 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
651 /* Did we just write the correct data out? */
652 KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
653 for (i = 0; i < BLOCK_TEST_SIZE; i++)
654 KUNIT_EXPECT_EQ(test, true, data->written[i]);
659 static void cache_sync_defaults(struct kunit *test)
661 struct regcache_types *t = (struct regcache_types *)test->param_value;
663 struct regmap_config config;
664 struct regmap_ram_data *data;
668 config = test_regmap_config;
669 config.cache_type = t->type;
670 config.num_reg_defaults = BLOCK_TEST_SIZE;
672 map = gen_regmap(&config, &data);
673 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
677 get_random_bytes(&val, sizeof(val));
679 /* Change the value of one register */
680 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
683 regcache_mark_dirty(map);
684 for (i = 0; i < BLOCK_TEST_SIZE; i++)
685 data->written[i] = false;
686 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
688 /* Did we just sync the one register we touched? */
689 for (i = 0; i < BLOCK_TEST_SIZE; i++)
690 KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
695 static void cache_sync_readonly(struct kunit *test)
697 struct regcache_types *t = (struct regcache_types *)test->param_value;
699 struct regmap_config config;
700 struct regmap_ram_data *data;
704 config = test_regmap_config;
705 config.cache_type = t->type;
706 config.writeable_reg = reg_5_false;
708 map = gen_regmap(&config, &data);
709 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
713 /* Read all registers to fill the cache */
714 for (i = 0; i < BLOCK_TEST_SIZE; i++)
715 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
717 /* Change the value of all registers, readonly should fail */
718 get_random_bytes(&val, sizeof(val));
719 regcache_cache_only(map, true);
720 for (i = 0; i < BLOCK_TEST_SIZE; i++)
721 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
722 regcache_cache_only(map, false);
725 for (i = 0; i < BLOCK_TEST_SIZE; i++)
726 data->written[i] = false;
727 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
729 /* Did that match what we see on the device? */
730 for (i = 0; i < BLOCK_TEST_SIZE; i++)
731 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
736 static void cache_sync_patch(struct kunit *test)
738 struct regcache_types *t = (struct regcache_types *)test->param_value;
740 struct regmap_config config;
741 struct regmap_ram_data *data;
742 struct reg_sequence patch[2];
743 unsigned int rval[BLOCK_TEST_SIZE], val;
746 /* We need defaults so readback works */
747 config = test_regmap_config;
748 config.cache_type = t->type;
749 config.num_reg_defaults = BLOCK_TEST_SIZE;
751 map = gen_regmap(&config, &data);
752 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
756 /* Stash the original values */
757 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
760 /* Patch a couple of values */
762 patch[0].def = rval[2] + 1;
763 patch[0].delay_us = 0;
765 patch[1].def = rval[5] + 1;
766 patch[1].delay_us = 0;
767 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
771 regcache_mark_dirty(map);
772 for (i = 0; i < BLOCK_TEST_SIZE; i++)
773 data->written[i] = false;
774 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
776 /* The patch should be on the device but not in the cache */
777 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
778 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
779 KUNIT_EXPECT_EQ(test, val, rval[i]);
784 KUNIT_EXPECT_EQ(test, true, data->written[i]);
785 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
788 KUNIT_EXPECT_EQ(test, false, data->written[i]);
789 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
797 static void cache_drop(struct kunit *test)
799 struct regcache_types *t = (struct regcache_types *)test->param_value;
801 struct regmap_config config;
802 struct regmap_ram_data *data;
803 unsigned int rval[BLOCK_TEST_SIZE];
806 config = test_regmap_config;
807 config.cache_type = t->type;
808 config.num_reg_defaults = BLOCK_TEST_SIZE;
810 map = gen_regmap(&config, &data);
811 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
815 /* Ensure the data is read from the cache */
816 for (i = 0; i < BLOCK_TEST_SIZE; i++)
817 data->read[i] = false;
818 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
820 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
821 KUNIT_EXPECT_FALSE(test, data->read[i]);
822 data->read[i] = false;
824 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
826 /* Drop some registers */
827 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
829 /* Reread and check only the dropped registers hit the device. */
830 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
832 for (i = 0; i < BLOCK_TEST_SIZE; i++)
833 KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
834 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
839 struct raw_test_types {
842 enum regcache_type cache_type;
843 enum regmap_endian val_endian;
846 static void raw_to_desc(const struct raw_test_types *t, char *desc)
848 strcpy(desc, t->name);
851 static const struct raw_test_types raw_types_list[] = {
852 { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
853 { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
854 { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
855 { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
856 { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
857 { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
858 { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
859 { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
862 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
864 static const struct raw_test_types raw_cache_types_list[] = {
865 { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
866 { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
867 { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
868 { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
869 { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
870 { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
873 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
875 static const struct regmap_config raw_regmap_config = {
876 .max_register = BLOCK_TEST_SIZE,
878 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
883 static struct regmap *gen_raw_regmap(struct regmap_config *config,
884 struct raw_test_types *test_type,
885 struct regmap_ram_data **data)
889 size_t size = (config->max_register + 1) * config->reg_bits / 8;
891 struct reg_default *defaults;
893 config->cache_type = test_type->cache_type;
894 config->val_format_endian = test_type->val_endian;
895 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
896 config->cache_type == REGCACHE_MAPLE;
898 buf = kmalloc(size, GFP_KERNEL);
900 return ERR_PTR(-ENOMEM);
902 get_random_bytes(buf, size);
904 *data = kzalloc(sizeof(**data), GFP_KERNEL);
906 return ERR_PTR(-ENOMEM);
907 (*data)->vals = (void *)buf;
909 config->num_reg_defaults = config->max_register + 1;
910 defaults = kcalloc(config->num_reg_defaults,
911 sizeof(struct reg_default),
914 return ERR_PTR(-ENOMEM);
915 config->reg_defaults = defaults;
917 for (i = 0; i < config->num_reg_defaults; i++) {
919 switch (test_type->val_endian) {
920 case REGMAP_ENDIAN_LITTLE:
921 defaults[i].def = le16_to_cpu(buf[i]);
923 case REGMAP_ENDIAN_BIG:
924 defaults[i].def = be16_to_cpu(buf[i]);
927 return ERR_PTR(-EINVAL);
932 * We use the defaults in the tests but they don't make sense
933 * to the core if there's no cache.
935 if (config->cache_type == REGCACHE_NONE)
936 config->num_reg_defaults = 0;
938 ret = regmap_init_raw_ram(config, *data);
947 static void raw_read_defaults_single(struct kunit *test)
949 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
951 struct regmap_config config;
952 struct regmap_ram_data *data;
956 config = raw_regmap_config;
958 map = gen_raw_regmap(&config, t, &data);
959 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
963 /* Check that we can read the defaults via the API */
964 for (i = 0; i < config.max_register + 1; i++) {
965 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
966 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
972 static void raw_read_defaults(struct kunit *test)
974 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
976 struct regmap_config config;
977 struct regmap_ram_data *data;
983 config = raw_regmap_config;
985 map = gen_raw_regmap(&config, t, &data);
986 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
990 val_len = sizeof(*rval) * (config.max_register + 1);
991 rval = kmalloc(val_len, GFP_KERNEL);
992 KUNIT_ASSERT_TRUE(test, rval != NULL);
996 /* Check that we can read the defaults via the API */
997 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
998 for (i = 0; i < config.max_register + 1; i++) {
999 def = config.reg_defaults[i].def;
1000 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1001 KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1003 KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1011 static void raw_write_read_single(struct kunit *test)
1013 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1015 struct regmap_config config;
1016 struct regmap_ram_data *data;
1020 config = raw_regmap_config;
1022 map = gen_raw_regmap(&config, t, &data);
1023 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1027 get_random_bytes(&val, sizeof(val));
1029 /* If we write a value to a register we can read it back */
1030 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1031 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1032 KUNIT_EXPECT_EQ(test, val, rval);
1037 static void raw_write(struct kunit *test)
1039 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1041 struct regmap_config config;
1042 struct regmap_ram_data *data;
1048 config = raw_regmap_config;
1050 map = gen_raw_regmap(&config, t, &data);
1051 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1055 hw_buf = (u16 *)data->vals;
1057 get_random_bytes(&val, sizeof(val));
1059 /* Do a raw write */
1060 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1062 /* We should read back the new values, and defaults for the rest */
1063 for (i = 0; i < config.max_register + 1; i++) {
1064 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1069 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1070 KUNIT_EXPECT_EQ(test, rval,
1071 be16_to_cpu(val[i % 2]));
1073 KUNIT_EXPECT_EQ(test, rval,
1074 le16_to_cpu(val[i % 2]));
1078 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1083 /* The values should appear in the "hardware" */
1084 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1089 static void raw_sync(struct kunit *test)
1091 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1093 struct regmap_config config;
1094 struct regmap_ram_data *data;
1100 config = raw_regmap_config;
1102 map = gen_raw_regmap(&config, t, &data);
1103 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1107 hw_buf = (u16 *)data->vals;
1109 get_random_bytes(&val, sizeof(val));
1111 /* Do a regular write and a raw write in cache only mode */
1112 regcache_cache_only(map, true);
1113 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1114 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1115 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1116 be16_to_cpu(val[0])));
1118 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1119 le16_to_cpu(val[0])));
1121 /* We should read back the new values, and defaults for the rest */
1122 for (i = 0; i < config.max_register + 1; i++) {
1123 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1129 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1130 KUNIT_EXPECT_EQ(test, rval,
1131 be16_to_cpu(val[i % 2]));
1133 KUNIT_EXPECT_EQ(test, rval,
1134 le16_to_cpu(val[i % 2]));
1138 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1143 /* The values should not appear in the "hardware" */
1144 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
1145 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
1147 for (i = 0; i < config.max_register + 1; i++)
1148 data->written[i] = false;
1151 regcache_cache_only(map, false);
1152 regcache_mark_dirty(map);
1153 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1155 /* The values should now appear in the "hardware" */
1156 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1157 KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
1162 static struct kunit_case regmap_test_cases[] = {
1163 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1164 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1165 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1166 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1167 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1168 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1169 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1170 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1171 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1172 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1173 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1174 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1175 KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1176 KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1177 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1178 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1179 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1181 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1182 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1183 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1184 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1185 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1189 static struct kunit_suite regmap_test_suite = {
1191 .test_cases = regmap_test_cases,
1193 kunit_test_suite(regmap_test_suite);
1195 MODULE_LICENSE("GPL v2");